Re-order imports
[rust-lightning] / lightning / src / ln / functional_tests.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 //! Tests that test standing up a network of ChannelManagers, creating channels, sending
11 //! payments/messages between them, and often checking the resulting ChannelMonitors are able to
12 //! claim outputs on-chain.
13
14 use crate::chain;
15 use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
16 use crate::chain::chaininterface::LowerBoundedFeeEstimator;
17 use crate::chain::channelmonitor;
18 use crate::chain::channelmonitor::{CLOSED_CHANNEL_UPDATE_ID, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
19 use crate::chain::transaction::OutPoint;
20 use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, OutputSpender, SignerProvider};
21 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
22 use crate::ln::types::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash};
23 use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase};
24 use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA};
25 use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError};
26 use crate::ln::{chan_utils, onion_utils};
27 use crate::ln::chan_utils::{OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment};
28 use crate::routing::gossip::{NetworkGraph, NetworkUpdate};
29 use crate::routing::router::{Path, PaymentParameters, Route, RouteHop, get_route, RouteParameters};
30 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
31 use crate::ln::msgs;
32 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
33 use crate::util::test_channel_signer::TestChannelSigner;
34 use crate::util::test_utils::{self, WatchtowerPersister};
35 use crate::util::errors::APIError;
36 use crate::util::ser::{Writeable, ReadableArgs};
37 use crate::util::string::UntrustedString;
38 use crate::util::config::{UserConfig, MaxDustHTLCExposure};
39
40 use bitcoin::hash_types::BlockHash;
41 use bitcoin::blockdata::locktime::absolute::LockTime;
42 use bitcoin::blockdata::script::{Builder, ScriptBuf};
43 use bitcoin::blockdata::opcodes;
44 use bitcoin::blockdata::constants::ChainHash;
45 use bitcoin::network::Network;
46 use bitcoin::{Amount, Sequence, Transaction, TxIn, TxOut, Witness};
47 use bitcoin::OutPoint as BitcoinOutPoint;
48 use bitcoin::transaction::Version;
49
50 use bitcoin::secp256k1::Secp256k1;
51 use bitcoin::secp256k1::{PublicKey,SecretKey};
52
53 use crate::io;
54 use crate::prelude::*;
55 use alloc::collections::BTreeSet;
56 use core::iter::repeat;
57 use bitcoin::hashes::Hash;
58 use crate::sync::{Arc, Mutex, RwLock};
59
60 use crate::ln::functional_test_utils::*;
61 use crate::ln::chan_utils::CommitmentTransaction;
62
63 use super::channel::UNFUNDED_CHANNEL_AGE_LIMIT_TICKS;
64
65 #[test]
66 fn test_channel_resumption_fail_post_funding() {
67         // If we fail to exchange funding with a peer prior to it disconnecting we'll resume the
68         // channel open on reconnect, however if we do exchange funding we do not currently support
69         // replaying it and here test that the channel closes.
70         let chanmon_cfgs = create_chanmon_cfgs(2);
71         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
72         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
73         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
74
75         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 0, 42, None, None).unwrap();
76         let open_chan = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
77         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan);
78         let accept_chan = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
79         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan);
80
81         let (temp_chan_id, tx, funding_output) =
82                 create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
83         let new_chan_id = ChannelId::v1_from_funding_outpoint(funding_output);
84         nodes[0].node.funding_transaction_generated(&temp_chan_id, &nodes[1].node.get_our_node_id(), tx).unwrap();
85
86         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
87         check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(new_chan_id, true, ClosureReason::DisconnectedPeer)]);
88
89         // After ddf75afd16 we'd panic on reconnection if we exchanged funding info, so test that
90         // explicitly here.
91         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
92                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
93         }, true).unwrap();
94         assert_eq!(nodes[0].node.get_and_clear_pending_msg_events(), Vec::new());
95 }
96
97 #[test]
98 fn test_insane_channel_opens() {
99         // Stand up a network of 2 nodes
100         use crate::ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS;
101         let mut cfg = UserConfig::default();
102         cfg.channel_handshake_limits.max_funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1;
103         let chanmon_cfgs = create_chanmon_cfgs(2);
104         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
105         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(cfg)]);
106         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
107
108         // Instantiate channel parameters where we push the maximum msats given our
109         // funding satoshis
110         let channel_value_sat = 31337; // same as funding satoshis
111         let channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg);
112         let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000;
113
114         // Have node0 initiate a channel to node1 with aforementioned parameters
115         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_sat, push_msat, 42, None, None).unwrap();
116
117         // Extract the channel open message from node0 to node1
118         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
119
120         // Test helper that asserts we get the correct error string given a mutator
121         // that supposedly makes the channel open message insane
122         let insane_open_helper = |expected_error_str: &str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| {
123                 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &message_mutator(open_channel_message.clone()));
124                 let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
125                 assert_eq!(msg_events.len(), 1);
126                 let expected_regex = regex::Regex::new(expected_error_str).unwrap();
127                 if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] {
128                         match action {
129                                 &ErrorAction::SendErrorMessage { .. } => {
130                                         nodes[1].logger.assert_log_regex("lightning::ln::channelmanager", expected_regex, 1);
131                                 },
132                                 _ => panic!("unexpected event!"),
133                         }
134                 } else { assert!(false); }
135         };
136
137         use crate::ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT;
138
139         // Test all mutations that would make the channel open message insane
140         insane_open_helper(format!("Per our config, funding must be at most {}. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2).as_str(), |mut msg| { msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; msg });
141         insane_open_helper(format!("Funding must be smaller than the total bitcoin supply. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS).as_str(), |mut msg| { msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; msg });
142
143         insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.common_fields.funding_satoshis + 1; msg });
144
145         insane_open_helper(r"push_msat \d+ was larger than channel amount minus reserve \(\d+\)", |mut msg| { msg.push_msat = (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
146
147         insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.common_fields.dust_limit_satoshis = msg.common_fields.funding_satoshis + 1 ; msg });
148
149         insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.common_fields.htlc_minimum_msat = (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg });
150
151         insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.common_fields.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg });
152
153         insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { msg.common_fields.max_accepted_htlcs = 0; msg });
154
155         insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.common_fields.max_accepted_htlcs = 484; msg });
156 }
157
158 #[test]
159 fn test_funding_exceeds_no_wumbo_limit() {
160         // Test that if a peer does not support wumbo channels, we'll refuse to open a wumbo channel to
161         // them.
162         use crate::ln::channel::MAX_FUNDING_SATOSHIS_NO_WUMBO;
163         let chanmon_cfgs = create_chanmon_cfgs(2);
164         let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
165         *node_cfgs[1].override_init_features.borrow_mut() = Some(channelmanager::provided_init_features(&test_default_channel_config()).clear_wumbo());
166         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
167         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
168
169         match nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), MAX_FUNDING_SATOSHIS_NO_WUMBO + 1, 0, 42, None, None) {
170                 Err(APIError::APIMisuseError { err }) => {
171                         assert_eq!(format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, MAX_FUNDING_SATOSHIS_NO_WUMBO + 1), err);
172                 },
173                 _ => panic!()
174         }
175 }
176
177 fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
178         // A peer providing a channel_reserve_satoshis of 0 (or less than our dust limit) is insecure,
179         // but only for them. Because some LSPs do it with some level of trust of the clients (for a
180         // substantial UX improvement), we explicitly allow it. Because it's unlikely to happen often
181         // in normal testing, we test it explicitly here.
182         let chanmon_cfgs = create_chanmon_cfgs(2);
183         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
184         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
185         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
186         let default_config = UserConfig::default();
187
188         // Have node0 initiate a channel to node1 with aforementioned parameters
189         let mut push_amt = 100_000_000;
190         let feerate_per_kw = 253;
191         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
192         push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(&channel_type_features) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000;
193         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
194
195         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None, None).unwrap();
196         let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
197         if !send_from_initiator {
198                 open_channel_message.channel_reserve_satoshis = 0;
199                 open_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000;
200         }
201         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
202
203         // Extract the channel accept message from node1 to node0
204         let mut accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
205         if send_from_initiator {
206                 accept_channel_message.channel_reserve_satoshis = 0;
207                 accept_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000;
208         }
209         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
210         {
211                 let sender_node = if send_from_initiator { &nodes[1] } else { &nodes[0] };
212                 let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] };
213                 let mut sender_node_per_peer_lock;
214                 let mut sender_node_peer_state_lock;
215
216                 let channel_phase = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
217                 match channel_phase {
218                         ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
219                                 let chan_context = channel_phase.context_mut();
220                                 chan_context.holder_selected_channel_reserve_satoshis = 0;
221                                 chan_context.holder_max_htlc_value_in_flight_msat = 100_000_000;
222                         },
223                         _ => assert!(false),
224                 }
225         }
226
227         let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id);
228         let funding_msgs = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &funding_tx);
229         create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_msgs.0);
230
231         // nodes[0] should now be able to send the full balance to nodes[1], violating nodes[1]'s
232         // security model if it ever tries to send funds back to nodes[0] (but that's not our problem).
233         if send_from_initiator {
234                 send_payment(&nodes[0], &[&nodes[1]], 100_000_000
235                         // Note that for outbound channels we have to consider the commitment tx fee and the
236                         // "fee spike buffer", which is currently a multiple of the total commitment tx fee as
237                         // well as an additional HTLC.
238                         - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * commit_tx_fee_msat(feerate_per_kw, 2, &channel_type_features));
239         } else {
240                 send_payment(&nodes[1], &[&nodes[0]], push_amt);
241         }
242 }
243
244 #[test]
245 fn test_counterparty_no_reserve() {
246         do_test_counterparty_no_reserve(true);
247         do_test_counterparty_no_reserve(false);
248 }
249
250 #[test]
251 fn test_async_inbound_update_fee() {
252         let chanmon_cfgs = create_chanmon_cfgs(2);
253         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
254         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
255         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
256         create_announced_chan_between_nodes(&nodes, 0, 1);
257
258         // balancing
259         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
260
261         // A                                        B
262         // update_fee                            ->
263         // send (1) commitment_signed            -.
264         //                                       <- update_add_htlc/commitment_signed
265         // send (2) RAA (awaiting remote revoke) -.
266         // (1) commitment_signed is delivered    ->
267         //                                       .- send (3) RAA (awaiting remote revoke)
268         // (2) RAA is delivered                  ->
269         //                                       .- send (4) commitment_signed
270         //                                       <- (3) RAA is delivered
271         // send (5) commitment_signed            -.
272         //                                       <- (4) commitment_signed is delivered
273         // send (6) RAA                          -.
274         // (5) commitment_signed is delivered    ->
275         //                                       <- RAA
276         // (6) RAA is delivered                  ->
277
278         // First nodes[0] generates an update_fee
279         {
280                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
281                 *feerate_lock += 20;
282         }
283         nodes[0].node.timer_tick_occurred();
284         check_added_monitors!(nodes[0], 1);
285
286         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
287         assert_eq!(events_0.len(), 1);
288         let (update_msg, commitment_signed) = match events_0[0] { // (1)
289                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
290                         (update_fee.as_ref(), commitment_signed)
291                 },
292                 _ => panic!("Unexpected event"),
293         };
294
295         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
296
297         // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
298         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000);
299         nodes[1].node.send_payment_with_route(&route, our_payment_hash,
300                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
301         check_added_monitors!(nodes[1], 1);
302
303         let payment_event = {
304                 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
305                 assert_eq!(events_1.len(), 1);
306                 SendEvent::from_event(events_1.remove(0))
307         };
308         assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
309         assert_eq!(payment_event.msgs.len(), 1);
310
311         // ...now when the messages get delivered everyone should be happy
312         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
313         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2)
314         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
315         // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
316         check_added_monitors!(nodes[0], 1);
317
318         // deliver(1), generate (3):
319         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
320         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
321         // nodes[1] is awaiting nodes[0] revoke_and_ack so get_event_msg's assert(len == 1) passes
322         check_added_monitors!(nodes[1], 1);
323
324         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack); // deliver (2)
325         let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
326         assert!(bs_update.update_add_htlcs.is_empty()); // (4)
327         assert!(bs_update.update_fulfill_htlcs.is_empty()); // (4)
328         assert!(bs_update.update_fail_htlcs.is_empty()); // (4)
329         assert!(bs_update.update_fail_malformed_htlcs.is_empty()); // (4)
330         assert!(bs_update.update_fee.is_none()); // (4)
331         check_added_monitors!(nodes[1], 1);
332
333         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); // deliver (3)
334         let as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
335         assert!(as_update.update_add_htlcs.is_empty()); // (5)
336         assert!(as_update.update_fulfill_htlcs.is_empty()); // (5)
337         assert!(as_update.update_fail_htlcs.is_empty()); // (5)
338         assert!(as_update.update_fail_malformed_htlcs.is_empty()); // (5)
339         assert!(as_update.update_fee.is_none()); // (5)
340         check_added_monitors!(nodes[0], 1);
341
342         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed); // deliver (4)
343         let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
344         // only (6) so get_event_msg's assert(len == 1) passes
345         check_added_monitors!(nodes[0], 1);
346
347         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update.commitment_signed); // deliver (5)
348         let bs_second_revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
349         check_added_monitors!(nodes[1], 1);
350
351         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke);
352         check_added_monitors!(nodes[0], 1);
353
354         let events_2 = nodes[0].node.get_and_clear_pending_events();
355         assert_eq!(events_2.len(), 1);
356         match events_2[0] {
357                 Event::PendingHTLCsForwardable {..} => {}, // If we actually processed we'd receive the payment
358                 _ => panic!("Unexpected event"),
359         }
360
361         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke); // deliver (6)
362         check_added_monitors!(nodes[1], 1);
363 }
364
365 #[test]
366 fn test_update_fee_unordered_raa() {
367         // Just the intro to the previous test followed by an out-of-order RAA (which caused a
368         // crash in an earlier version of the update_fee patch)
369         let chanmon_cfgs = create_chanmon_cfgs(2);
370         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
371         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
372         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
373         create_announced_chan_between_nodes(&nodes, 0, 1);
374
375         // balancing
376         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
377
378         // First nodes[0] generates an update_fee
379         {
380                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
381                 *feerate_lock += 20;
382         }
383         nodes[0].node.timer_tick_occurred();
384         check_added_monitors!(nodes[0], 1);
385
386         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
387         assert_eq!(events_0.len(), 1);
388         let update_msg = match events_0[0] { // (1)
389                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
390                         update_fee.as_ref()
391                 },
392                 _ => panic!("Unexpected event"),
393         };
394
395         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
396
397         // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
398         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000);
399         nodes[1].node.send_payment_with_route(&route, our_payment_hash,
400                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
401         check_added_monitors!(nodes[1], 1);
402
403         let payment_event = {
404                 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
405                 assert_eq!(events_1.len(), 1);
406                 SendEvent::from_event(events_1.remove(0))
407         };
408         assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
409         assert_eq!(payment_event.msgs.len(), 1);
410
411         // ...now when the messages get delivered everyone should be happy
412         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
413         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2)
414         let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
415         // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
416         check_added_monitors!(nodes[0], 1);
417
418         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg); // deliver (2)
419         check_added_monitors!(nodes[1], 1);
420
421         // We can't continue, sadly, because our (1) now has a bogus signature
422 }
423
424 #[test]
425 fn test_multi_flight_update_fee() {
426         let chanmon_cfgs = create_chanmon_cfgs(2);
427         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
428         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
429         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
430         create_announced_chan_between_nodes(&nodes, 0, 1);
431
432         // A                                        B
433         // update_fee/commitment_signed          ->
434         //                                       .- send (1) RAA and (2) commitment_signed
435         // update_fee (never committed)          ->
436         // (3) update_fee                        ->
437         // We have to manually generate the above update_fee, it is allowed by the protocol but we
438         // don't track which updates correspond to which revoke_and_ack responses so we're in
439         // AwaitingRAA mode and will not generate the update_fee yet.
440         //                                       <- (1) RAA delivered
441         // (3) is generated and send (4) CS      -.
442         // Note that A cannot generate (4) prior to (1) being delivered as it otherwise doesn't
443         // know the per_commitment_point to use for it.
444         //                                       <- (2) commitment_signed delivered
445         // revoke_and_ack                        ->
446         //                                          B should send no response here
447         // (4) commitment_signed delivered       ->
448         //                                       <- RAA/commitment_signed delivered
449         // revoke_and_ack                        ->
450
451         // First nodes[0] generates an update_fee
452         let initial_feerate;
453         {
454                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
455                 initial_feerate = *feerate_lock;
456                 *feerate_lock = initial_feerate + 20;
457         }
458         nodes[0].node.timer_tick_occurred();
459         check_added_monitors!(nodes[0], 1);
460
461         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
462         assert_eq!(events_0.len(), 1);
463         let (update_msg_1, commitment_signed_1) = match events_0[0] { // (1)
464                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
465                         (update_fee.as_ref().unwrap(), commitment_signed)
466                 },
467                 _ => panic!("Unexpected event"),
468         };
469
470         // Deliver first update_fee/commitment_signed pair, generating (1) and (2):
471         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg_1);
472         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed_1);
473         let (bs_revoke_msg, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
474         check_added_monitors!(nodes[1], 1);
475
476         // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment
477         // transaction:
478         {
479                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
480                 *feerate_lock = initial_feerate + 40;
481         }
482         nodes[0].node.timer_tick_occurred();
483         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
484         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
485
486         // Create the (3) update_fee message that nodes[0] will generate before it does...
487         let mut update_msg_2 = msgs::UpdateFee {
488                 channel_id: update_msg_1.channel_id.clone(),
489                 feerate_per_kw: (initial_feerate + 30) as u32,
490         };
491
492         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2);
493
494         update_msg_2.feerate_per_kw = (initial_feerate + 40) as u32;
495         // Deliver (3)
496         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2);
497
498         // Deliver (1), generating (3) and (4)
499         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg);
500         let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
501         check_added_monitors!(nodes[0], 1);
502         assert!(as_second_update.update_add_htlcs.is_empty());
503         assert!(as_second_update.update_fulfill_htlcs.is_empty());
504         assert!(as_second_update.update_fail_htlcs.is_empty());
505         assert!(as_second_update.update_fail_malformed_htlcs.is_empty());
506         // Check that the update_fee newly generated matches what we delivered:
507         assert_eq!(as_second_update.update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id);
508         assert_eq!(as_second_update.update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw);
509
510         // Deliver (2) commitment_signed
511         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
512         let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
513         check_added_monitors!(nodes[0], 1);
514         // No commitment_signed so get_event_msg's assert(len == 1) passes
515
516         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg);
517         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
518         check_added_monitors!(nodes[1], 1);
519
520         // Delever (4)
521         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed);
522         let (bs_second_revoke, bs_second_commitment) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
523         check_added_monitors!(nodes[1], 1);
524
525         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke);
526         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
527         check_added_monitors!(nodes[0], 1);
528
529         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment);
530         let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
531         // No commitment_signed so get_event_msg's assert(len == 1) passes
532         check_added_monitors!(nodes[0], 1);
533
534         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke);
535         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
536         check_added_monitors!(nodes[1], 1);
537 }
538
539 fn do_test_sanity_on_in_flight_opens(steps: u8) {
540         // Previously, we had issues deserializing channels when we hadn't connected the first block
541         // after creation. To catch that and similar issues, we lean on the Node::drop impl to test
542         // serialization round-trips and simply do steps towards opening a channel and then drop the
543         // Node objects.
544
545         let chanmon_cfgs = create_chanmon_cfgs(2);
546         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
547         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
548         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
549
550         if steps & 0b1000_0000 != 0{
551                 let block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new());
552                 connect_block(&nodes[0], &block);
553                 connect_block(&nodes[1], &block);
554         }
555
556         if steps & 0x0f == 0 { return; }
557         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
558         let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
559
560         if steps & 0x0f == 1 { return; }
561         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
562         let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
563
564         if steps & 0x0f == 2 { return; }
565         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
566
567         let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
568
569         if steps & 0x0f == 3 { return; }
570         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
571         check_added_monitors!(nodes[0], 0);
572         let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
573
574         if steps & 0x0f == 4 { return; }
575         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
576         {
577                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
578                 assert_eq!(added_monitors.len(), 1);
579                 assert_eq!(added_monitors[0].0, funding_output);
580                 added_monitors.clear();
581         }
582         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
583
584         let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
585
586         if steps & 0x0f == 5 { return; }
587         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
588         {
589                 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
590                 assert_eq!(added_monitors.len(), 1);
591                 assert_eq!(added_monitors[0].0, funding_output);
592                 added_monitors.clear();
593         }
594
595         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
596         let events_4 = nodes[0].node.get_and_clear_pending_events();
597         assert_eq!(events_4.len(), 0);
598
599         if steps & 0x0f == 6 { return; }
600         create_chan_between_nodes_with_value_confirm_first(&nodes[0], &nodes[1], &tx, 2);
601
602         if steps & 0x0f == 7 { return; }
603         confirm_transaction_at(&nodes[0], &tx, 2);
604         connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
605         create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
606         expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id());
607 }
608
609 #[test]
610 fn test_sanity_on_in_flight_opens() {
611         do_test_sanity_on_in_flight_opens(0);
612         do_test_sanity_on_in_flight_opens(0 | 0b1000_0000);
613         do_test_sanity_on_in_flight_opens(1);
614         do_test_sanity_on_in_flight_opens(1 | 0b1000_0000);
615         do_test_sanity_on_in_flight_opens(2);
616         do_test_sanity_on_in_flight_opens(2 | 0b1000_0000);
617         do_test_sanity_on_in_flight_opens(3);
618         do_test_sanity_on_in_flight_opens(3 | 0b1000_0000);
619         do_test_sanity_on_in_flight_opens(4);
620         do_test_sanity_on_in_flight_opens(4 | 0b1000_0000);
621         do_test_sanity_on_in_flight_opens(5);
622         do_test_sanity_on_in_flight_opens(5 | 0b1000_0000);
623         do_test_sanity_on_in_flight_opens(6);
624         do_test_sanity_on_in_flight_opens(6 | 0b1000_0000);
625         do_test_sanity_on_in_flight_opens(7);
626         do_test_sanity_on_in_flight_opens(7 | 0b1000_0000);
627         do_test_sanity_on_in_flight_opens(8);
628         do_test_sanity_on_in_flight_opens(8 | 0b1000_0000);
629 }
630
631 #[test]
632 fn test_update_fee_vanilla() {
633         let chanmon_cfgs = create_chanmon_cfgs(2);
634         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
635         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
636         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
637         create_announced_chan_between_nodes(&nodes, 0, 1);
638
639         {
640                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
641                 *feerate_lock += 25;
642         }
643         nodes[0].node.timer_tick_occurred();
644         check_added_monitors!(nodes[0], 1);
645
646         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
647         assert_eq!(events_0.len(), 1);
648         let (update_msg, commitment_signed) = match events_0[0] {
649                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
650                         (update_fee.as_ref(), commitment_signed)
651                 },
652                 _ => panic!("Unexpected event"),
653         };
654         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
655
656         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
657         let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
658         check_added_monitors!(nodes[1], 1);
659
660         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
661         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
662         check_added_monitors!(nodes[0], 1);
663
664         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
665         let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
666         // No commitment_signed so get_event_msg's assert(len == 1) passes
667         check_added_monitors!(nodes[0], 1);
668
669         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
670         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
671         check_added_monitors!(nodes[1], 1);
672 }
673
674 #[test]
675 fn test_update_fee_that_funder_cannot_afford() {
676         let chanmon_cfgs = create_chanmon_cfgs(2);
677         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
678         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
679         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
680         let channel_value = 5000;
681         let push_sats = 700;
682         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, push_sats * 1000);
683         let channel_id = chan.2;
684         let secp_ctx = Secp256k1::new();
685         let default_config = UserConfig::default();
686         let bs_channel_reserve_sats = get_holder_selected_channel_reserve_satoshis(channel_value, &default_config);
687
688         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
689
690         // Calculate the maximum feerate that A can afford. Note that we don't send an update_fee
691         // CONCURRENT_INBOUND_HTLC_FEE_BUFFER HTLCs before actually running out of local balance, so we
692         // calculate two different feerates here - the expected local limit as well as the expected
693         // remote limit.
694         let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / (commitment_tx_base_weight(&channel_type_features) + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32;
695         let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / commitment_tx_base_weight(&channel_type_features)) as u32;
696         {
697                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
698                 *feerate_lock = feerate;
699         }
700         nodes[0].node.timer_tick_occurred();
701         check_added_monitors!(nodes[0], 1);
702         let update_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
703
704         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg.update_fee.unwrap());
705
706         commitment_signed_dance!(nodes[1], nodes[0], update_msg.commitment_signed, false);
707
708         // Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate set above.
709         {
710                 let commitment_tx = get_local_commitment_txn!(nodes[1], channel_id)[0].clone();
711
712                 //We made sure neither party's funds are below the dust limit and there are no HTLCs here
713                 assert_eq!(commitment_tx.output.len(), 2);
714                 let total_fee: u64 = commit_tx_fee_msat(feerate, 0, &channel_type_features) / 1000;
715                 let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value.to_sat());
716                 actual_fee = channel_value - actual_fee;
717                 assert_eq!(total_fee, actual_fee);
718         }
719
720         {
721                 // Increment the feerate by a small constant, accounting for rounding errors
722                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
723                 *feerate_lock += 4;
724         }
725         nodes[0].node.timer_tick_occurred();
726         nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot afford to send new feerate at {}", feerate + 4), 1);
727         check_added_monitors!(nodes[0], 0);
728
729         const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654;
730
731         // Get the TestChannelSigner for each channel, which will be used to (1) get the keys
732         // needed to sign the new commitment tx and (2) sign the new commitment tx.
733         let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = {
734                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
735                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
736                 let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
737                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
738                 ).flatten().unwrap();
739                 let chan_signer = local_chan.get_signer();
740                 let pubkeys = chan_signer.as_ref().pubkeys();
741                 (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
742                  pubkeys.funding_pubkey)
743         };
744         let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point, remote_funding) = {
745                 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
746                 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
747                 let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
748                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
749                 ).flatten().unwrap();
750                 let chan_signer = remote_chan.get_signer();
751                 let pubkeys = chan_signer.as_ref().pubkeys();
752                 (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
753                  chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
754                  pubkeys.funding_pubkey)
755         };
756
757         // Assemble the set of keys we can use for signatures for our commitment_signed message.
758         let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint,
759                 &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint);
760
761         let res = {
762                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
763                 let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
764                 let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
765                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
766                 ).flatten().unwrap();
767                 let local_chan_signer = local_chan.get_signer();
768                 let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![];
769                 let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
770                         INITIAL_COMMITMENT_NUMBER - 1,
771                         push_sats,
772                         channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, &channel_type_features) / 1000,
773                         local_funding, remote_funding,
774                         commit_tx_keys.clone(),
775                         non_buffer_feerate + 4,
776                         &mut htlcs,
777                         &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
778                 );
779                 local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), Vec::new(), &secp_ctx).unwrap()
780         };
781
782         let commit_signed_msg = msgs::CommitmentSigned {
783                 channel_id: chan.2,
784                 signature: res.0,
785                 htlc_signatures: res.1,
786                 #[cfg(taproot)]
787                 partial_signature_with_nonce: None,
788         };
789
790         let update_fee = msgs::UpdateFee {
791                 channel_id: chan.2,
792                 feerate_per_kw: non_buffer_feerate + 4,
793         };
794
795         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_fee);
796
797         //While producing the commitment_signed response after handling a received update_fee request the
798         //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve)
799         //Should produce and error.
800         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
801         nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee", 3);
802         check_added_monitors!(nodes[1], 1);
803         check_closed_broadcast!(nodes[1], true);
804         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") },
805                 [nodes[0].node.get_our_node_id()], channel_value);
806 }
807
808 #[test]
809 fn test_update_fee_with_fundee_update_add_htlc() {
810         let chanmon_cfgs = create_chanmon_cfgs(2);
811         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
812         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
813         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
814         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
815
816         // balancing
817         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
818
819         {
820                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
821                 *feerate_lock += 20;
822         }
823         nodes[0].node.timer_tick_occurred();
824         check_added_monitors!(nodes[0], 1);
825
826         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
827         assert_eq!(events_0.len(), 1);
828         let (update_msg, commitment_signed) = match events_0[0] {
829                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
830                         (update_fee.as_ref(), commitment_signed)
831                 },
832                 _ => panic!("Unexpected event"),
833         };
834         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
835         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
836         let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
837         check_added_monitors!(nodes[1], 1);
838
839         let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 800000);
840
841         // nothing happens since node[1] is in AwaitingRemoteRevoke
842         nodes[1].node.send_payment_with_route(&route, our_payment_hash,
843                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
844         {
845                 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
846                 assert_eq!(added_monitors.len(), 0);
847                 added_monitors.clear();
848         }
849         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
850         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
851         // node[1] has nothing to do
852
853         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
854         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
855         check_added_monitors!(nodes[0], 1);
856
857         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
858         let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
859         // No commitment_signed so get_event_msg's assert(len == 1) passes
860         check_added_monitors!(nodes[0], 1);
861         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
862         check_added_monitors!(nodes[1], 1);
863         // AwaitingRemoteRevoke ends here
864
865         let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
866         assert_eq!(commitment_update.update_add_htlcs.len(), 1);
867         assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0);
868         assert_eq!(commitment_update.update_fail_htlcs.len(), 0);
869         assert_eq!(commitment_update.update_fail_malformed_htlcs.len(), 0);
870         assert_eq!(commitment_update.update_fee.is_none(), true);
871
872         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &commitment_update.update_add_htlcs[0]);
873         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed);
874         check_added_monitors!(nodes[0], 1);
875         let (revoke, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
876
877         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke);
878         check_added_monitors!(nodes[1], 1);
879         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
880
881         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed);
882         check_added_monitors!(nodes[1], 1);
883         let revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
884         // No commitment_signed so get_event_msg's assert(len == 1) passes
885
886         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke);
887         check_added_monitors!(nodes[0], 1);
888         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
889
890         expect_pending_htlcs_forwardable!(nodes[0]);
891
892         let events = nodes[0].node.get_and_clear_pending_events();
893         assert_eq!(events.len(), 1);
894         match events[0] {
895                 Event::PaymentClaimable { .. } => { },
896                 _ => panic!("Unexpected event"),
897         };
898
899         claim_payment(&nodes[1], &vec!(&nodes[0])[..], our_payment_preimage);
900
901         send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
902         send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
903         close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
904         check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
905         check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
906 }
907
908 #[test]
909 fn test_update_fee() {
910         let chanmon_cfgs = create_chanmon_cfgs(2);
911         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
912         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
913         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
914         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
915         let channel_id = chan.2;
916
917         // A                                        B
918         // (1) update_fee/commitment_signed      ->
919         //                                       <- (2) revoke_and_ack
920         //                                       .- send (3) commitment_signed
921         // (4) update_fee/commitment_signed      ->
922         //                                       .- send (5) revoke_and_ack (no CS as we're awaiting a revoke)
923         //                                       <- (3) commitment_signed delivered
924         // send (6) revoke_and_ack               -.
925         //                                       <- (5) deliver revoke_and_ack
926         // (6) deliver revoke_and_ack            ->
927         //                                       .- send (7) commitment_signed in response to (4)
928         //                                       <- (7) deliver commitment_signed
929         // revoke_and_ack                        ->
930
931         // Create and deliver (1)...
932         let feerate;
933         {
934                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
935                 feerate = *feerate_lock;
936                 *feerate_lock = feerate + 20;
937         }
938         nodes[0].node.timer_tick_occurred();
939         check_added_monitors!(nodes[0], 1);
940
941         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
942         assert_eq!(events_0.len(), 1);
943         let (update_msg, commitment_signed) = match events_0[0] {
944                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
945                         (update_fee.as_ref(), commitment_signed)
946                 },
947                 _ => panic!("Unexpected event"),
948         };
949         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
950
951         // Generate (2) and (3):
952         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
953         let (revoke_msg, commitment_signed_0) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
954         check_added_monitors!(nodes[1], 1);
955
956         // Deliver (2):
957         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
958         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
959         check_added_monitors!(nodes[0], 1);
960
961         // Create and deliver (4)...
962         {
963                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
964                 *feerate_lock = feerate + 30;
965         }
966         nodes[0].node.timer_tick_occurred();
967         check_added_monitors!(nodes[0], 1);
968         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
969         assert_eq!(events_0.len(), 1);
970         let (update_msg, commitment_signed) = match events_0[0] {
971                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
972                         (update_fee.as_ref(), commitment_signed)
973                 },
974                 _ => panic!("Unexpected event"),
975         };
976
977         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
978         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
979         check_added_monitors!(nodes[1], 1);
980         // ... creating (5)
981         let revoke_msg = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
982         // No commitment_signed so get_event_msg's assert(len == 1) passes
983
984         // Handle (3), creating (6):
985         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_0);
986         check_added_monitors!(nodes[0], 1);
987         let revoke_msg_0 = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
988         // No commitment_signed so get_event_msg's assert(len == 1) passes
989
990         // Deliver (5):
991         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
992         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
993         check_added_monitors!(nodes[0], 1);
994
995         // Deliver (6), creating (7):
996         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg_0);
997         let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
998         assert!(commitment_update.update_add_htlcs.is_empty());
999         assert!(commitment_update.update_fulfill_htlcs.is_empty());
1000         assert!(commitment_update.update_fail_htlcs.is_empty());
1001         assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
1002         assert!(commitment_update.update_fee.is_none());
1003         check_added_monitors!(nodes[1], 1);
1004
1005         // Deliver (7)
1006         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed);
1007         check_added_monitors!(nodes[0], 1);
1008         let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1009         // No commitment_signed so get_event_msg's assert(len == 1) passes
1010
1011         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
1012         check_added_monitors!(nodes[1], 1);
1013         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1014
1015         assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30);
1016         assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30);
1017         close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
1018         check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1019         check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
1020 }
1021
1022 #[test]
1023 fn fake_network_test() {
1024         // Simple test which builds a network of ChannelManagers, connects them to each other, and
1025         // tests that payments get routed and transactions broadcast in semi-reasonable ways.
1026         let chanmon_cfgs = create_chanmon_cfgs(4);
1027         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
1028         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
1029         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
1030
1031         // Create some initial channels
1032         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1033         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
1034         let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
1035
1036         // Rebalance the network a bit by relaying one payment through all the channels...
1037         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1038         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1039         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1040         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1041
1042         // Send some more payments
1043         send_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 1000000);
1044         send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1], &nodes[0])[..], 1000000);
1045         send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1])[..], 1000000);
1046
1047         // Test failure packets
1048         let payment_hash_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 1000000).1;
1049         fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], payment_hash_1);
1050
1051         // Add a new channel that skips 3
1052         let chan_4 = create_announced_chan_between_nodes(&nodes, 1, 3);
1053
1054         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 1000000);
1055         send_payment(&nodes[2], &vec!(&nodes[3])[..], 1000000);
1056         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1057         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1058         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1059         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1060         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1061
1062         // Do some rebalance loop payments, simultaneously
1063         let mut hops = Vec::with_capacity(3);
1064         hops.push(RouteHop {
1065                 pubkey: nodes[2].node.get_our_node_id(),
1066                 node_features: NodeFeatures::empty(),
1067                 short_channel_id: chan_2.0.contents.short_channel_id,
1068                 channel_features: ChannelFeatures::empty(),
1069                 fee_msat: 0,
1070                 cltv_expiry_delta: chan_3.0.contents.cltv_expiry_delta as u32,
1071                 maybe_announced_channel: true,
1072         });
1073         hops.push(RouteHop {
1074                 pubkey: nodes[3].node.get_our_node_id(),
1075                 node_features: NodeFeatures::empty(),
1076                 short_channel_id: chan_3.0.contents.short_channel_id,
1077                 channel_features: ChannelFeatures::empty(),
1078                 fee_msat: 0,
1079                 cltv_expiry_delta: chan_4.1.contents.cltv_expiry_delta as u32,
1080                 maybe_announced_channel: true,
1081         });
1082         hops.push(RouteHop {
1083                 pubkey: nodes[1].node.get_our_node_id(),
1084                 node_features: nodes[1].node.node_features(),
1085                 short_channel_id: chan_4.0.contents.short_channel_id,
1086                 channel_features: nodes[1].node.channel_features(),
1087                 fee_msat: 1000000,
1088                 cltv_expiry_delta: TEST_FINAL_CLTV,
1089                 maybe_announced_channel: true,
1090         });
1091         hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
1092         hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
1093         let payment_preimage_1 = send_along_route(&nodes[1],
1094                 Route { paths: vec![Path { hops, blinded_tail: None }], route_params: None },
1095                         &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0;
1096
1097         let mut hops = Vec::with_capacity(3);
1098         hops.push(RouteHop {
1099                 pubkey: nodes[3].node.get_our_node_id(),
1100                 node_features: NodeFeatures::empty(),
1101                 short_channel_id: chan_4.0.contents.short_channel_id,
1102                 channel_features: ChannelFeatures::empty(),
1103                 fee_msat: 0,
1104                 cltv_expiry_delta: chan_3.1.contents.cltv_expiry_delta as u32,
1105                 maybe_announced_channel: true,
1106         });
1107         hops.push(RouteHop {
1108                 pubkey: nodes[2].node.get_our_node_id(),
1109                 node_features: NodeFeatures::empty(),
1110                 short_channel_id: chan_3.0.contents.short_channel_id,
1111                 channel_features: ChannelFeatures::empty(),
1112                 fee_msat: 0,
1113                 cltv_expiry_delta: chan_2.1.contents.cltv_expiry_delta as u32,
1114                 maybe_announced_channel: true,
1115         });
1116         hops.push(RouteHop {
1117                 pubkey: nodes[1].node.get_our_node_id(),
1118                 node_features: nodes[1].node.node_features(),
1119                 short_channel_id: chan_2.0.contents.short_channel_id,
1120                 channel_features: nodes[1].node.channel_features(),
1121                 fee_msat: 1000000,
1122                 cltv_expiry_delta: TEST_FINAL_CLTV,
1123                 maybe_announced_channel: true,
1124         });
1125         hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
1126         hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
1127         let payment_hash_2 = send_along_route(&nodes[1],
1128                 Route { paths: vec![Path { hops, blinded_tail: None }], route_params: None },
1129                         &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1;
1130
1131         // Claim the rebalances...
1132         fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
1133         claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
1134
1135         // Close down the channels...
1136         close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
1137         check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1138         check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
1139         close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
1140         check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
1141         check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1142         close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
1143         check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
1144         check_closed_event!(nodes[3], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
1145         close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
1146         check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
1147         check_closed_event!(nodes[3], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1148 }
1149
1150 #[test]
1151 fn holding_cell_htlc_counting() {
1152         // Tests that HTLCs in the holding cell count towards the pending HTLC limits on outbound HTLCs
1153         // to ensure we don't end up with HTLCs sitting around in our holding cell for several
1154         // commitment dance rounds.
1155         let chanmon_cfgs = create_chanmon_cfgs(3);
1156         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1157         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1158         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1159         create_announced_chan_between_nodes(&nodes, 0, 1);
1160         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
1161
1162         // Fetch a route in advance as we will be unable to once we're unable to send.
1163         let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
1164
1165         let mut payments = Vec::new();
1166         for _ in 0..50 {
1167                 let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
1168                 nodes[1].node.send_payment_with_route(&route, payment_hash,
1169                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
1170                 payments.push((payment_preimage, payment_hash));
1171         }
1172         check_added_monitors!(nodes[1], 1);
1173
1174         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
1175         assert_eq!(events.len(), 1);
1176         let initial_payment_event = SendEvent::from_event(events.pop().unwrap());
1177         assert_eq!(initial_payment_event.node_id, nodes[2].node.get_our_node_id());
1178
1179         // There is now one HTLC in an outbound commitment transaction and (OUR_MAX_HTLCS - 1) HTLCs in
1180         // the holding cell waiting on B's RAA to send. At this point we should not be able to add
1181         // another HTLC.
1182         {
1183                 unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, payment_hash_1,
1184                                 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)
1185                         ), true, APIError::ChannelUnavailable { .. }, {});
1186                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1187         }
1188
1189         // This should also be true if we try to forward a payment.
1190         let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
1191         {
1192                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1193                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1194                 check_added_monitors!(nodes[0], 1);
1195         }
1196
1197         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1198         assert_eq!(events.len(), 1);
1199         let payment_event = SendEvent::from_event(events.pop().unwrap());
1200         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1201
1202         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1203         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
1204         // We have to forward pending HTLCs twice - once tries to forward the payment forward (and
1205         // fails), the second will process the resulting failure and fail the HTLC backward.
1206         expect_pending_htlcs_forwardable!(nodes[1]);
1207         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
1208         check_added_monitors!(nodes[1], 1);
1209
1210         let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1211         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_updates.update_fail_htlcs[0]);
1212         commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true);
1213
1214         expect_payment_failed_with_update!(nodes[0], payment_hash_2, false, chan_2.0.contents.short_channel_id, false);
1215
1216         // Now forward all the pending HTLCs and claim them back
1217         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &initial_payment_event.msgs[0]);
1218         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &initial_payment_event.commitment_msg);
1219         check_added_monitors!(nodes[2], 1);
1220
1221         let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1222         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1223         check_added_monitors!(nodes[1], 1);
1224         let as_updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1225
1226         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed);
1227         check_added_monitors!(nodes[1], 1);
1228         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1229
1230         for ref update in as_updates.update_add_htlcs.iter() {
1231                 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), update);
1232         }
1233         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_updates.commitment_signed);
1234         check_added_monitors!(nodes[2], 1);
1235         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
1236         check_added_monitors!(nodes[2], 1);
1237         let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1238
1239         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1240         check_added_monitors!(nodes[1], 1);
1241         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed);
1242         check_added_monitors!(nodes[1], 1);
1243         let as_final_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1244
1245         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_final_raa);
1246         check_added_monitors!(nodes[2], 1);
1247
1248         expect_pending_htlcs_forwardable!(nodes[2]);
1249
1250         let events = nodes[2].node.get_and_clear_pending_events();
1251         assert_eq!(events.len(), payments.len());
1252         for (event, &(_, ref hash)) in events.iter().zip(payments.iter()) {
1253                 match event {
1254                         &Event::PaymentClaimable { ref payment_hash, .. } => {
1255                                 assert_eq!(*payment_hash, *hash);
1256                         },
1257                         _ => panic!("Unexpected event"),
1258                 };
1259         }
1260
1261         for (preimage, _) in payments.drain(..) {
1262                 claim_payment(&nodes[1], &[&nodes[2]], preimage);
1263         }
1264
1265         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
1266 }
1267
1268 #[test]
1269 fn duplicate_htlc_test() {
1270         // Test that we accept duplicate payment_hash HTLCs across the network and that
1271         // claiming/failing them are all separate and don't affect each other
1272         let chanmon_cfgs = create_chanmon_cfgs(6);
1273         let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
1274         let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs, &[None, None, None, None, None, None]);
1275         let mut nodes = create_network(6, &node_cfgs, &node_chanmgrs);
1276
1277         // Create some initial channels to route via 3 to 4/5 from 0/1/2
1278         create_announced_chan_between_nodes(&nodes, 0, 3);
1279         create_announced_chan_between_nodes(&nodes, 1, 3);
1280         create_announced_chan_between_nodes(&nodes, 2, 3);
1281         create_announced_chan_between_nodes(&nodes, 3, 4);
1282         create_announced_chan_between_nodes(&nodes, 3, 5);
1283
1284         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000);
1285
1286         *nodes[0].network_payment_count.borrow_mut() -= 1;
1287         assert_eq!(route_payment(&nodes[1], &vec!(&nodes[3])[..], 1000000).0, payment_preimage);
1288
1289         *nodes[0].network_payment_count.borrow_mut() -= 1;
1290         assert_eq!(route_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], 1000000).0, payment_preimage);
1291
1292         claim_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], payment_preimage);
1293         fail_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], payment_hash);
1294         claim_payment(&nodes[1], &vec!(&nodes[3])[..], payment_preimage);
1295 }
1296
1297 #[test]
1298 fn test_duplicate_htlc_different_direction_onchain() {
1299         // Test that ChannelMonitor doesn't generate 2 preimage txn
1300         // when we have 2 HTLCs with same preimage that go across a node
1301         // in opposite directions, even with the same payment secret.
1302         let chanmon_cfgs = create_chanmon_cfgs(2);
1303         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1304         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1305         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1306
1307         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1308
1309         // balancing
1310         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
1311
1312         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000);
1313
1314         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], 800_000);
1315         let node_a_payment_secret = nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
1316         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[0]]], 800_000, payment_hash, node_a_payment_secret);
1317
1318         // Provide preimage to node 0 by claiming payment
1319         nodes[0].node.claim_funds(payment_preimage);
1320         expect_payment_claimed!(nodes[0], payment_hash, 800_000);
1321         check_added_monitors!(nodes[0], 1);
1322
1323         // Broadcast node 1 commitment txn
1324         let remote_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
1325
1326         assert_eq!(remote_txn[0].output.len(), 4); // 1 local, 1 remote, 1 htlc inbound, 1 htlc outbound
1327         let mut has_both_htlcs = 0; // check htlcs match ones committed
1328         for outp in remote_txn[0].output.iter() {
1329                 if outp.value.to_sat() == 800_000 / 1000 {
1330                         has_both_htlcs += 1;
1331                 } else if outp.value.to_sat() == 900_000 / 1000 {
1332                         has_both_htlcs += 1;
1333                 }
1334         }
1335         assert_eq!(has_both_htlcs, 2);
1336
1337         mine_transaction(&nodes[0], &remote_txn[0]);
1338         check_added_monitors!(nodes[0], 1);
1339         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
1340         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
1341
1342         let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
1343         assert_eq!(claim_txn.len(), 3);
1344
1345         check_spends!(claim_txn[0], remote_txn[0]); // Immediate HTLC claim with preimage
1346         check_spends!(claim_txn[1], remote_txn[0]);
1347         check_spends!(claim_txn[2], remote_txn[0]);
1348         let preimage_tx = &claim_txn[0];
1349         let (preimage_bump_tx, timeout_tx) = if claim_txn[1].input[0].previous_output == preimage_tx.input[0].previous_output {
1350                 (&claim_txn[1], &claim_txn[2])
1351         } else {
1352                 (&claim_txn[2], &claim_txn[1])
1353         };
1354
1355         assert_eq!(preimage_tx.input.len(), 1);
1356         assert_eq!(preimage_bump_tx.input.len(), 1);
1357
1358         assert_eq!(preimage_tx.input.len(), 1);
1359         assert_eq!(preimage_tx.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC 1 <--> 0, preimage tx
1360         assert_eq!(remote_txn[0].output[preimage_tx.input[0].previous_output.vout as usize].value.to_sat(), 800);
1361
1362         assert_eq!(timeout_tx.input.len(), 1);
1363         assert_eq!(timeout_tx.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // HTLC 0 <--> 1, timeout tx
1364         check_spends!(timeout_tx, remote_txn[0]);
1365         assert_eq!(remote_txn[0].output[timeout_tx.input[0].previous_output.vout as usize].value.to_sat(), 900);
1366
1367         let events = nodes[0].node.get_and_clear_pending_msg_events();
1368         assert_eq!(events.len(), 3);
1369         for e in events {
1370                 match e {
1371                         MessageSendEvent::BroadcastChannelUpdate { .. } => {},
1372                         MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::DisconnectPeer { ref msg } } => {
1373                                 assert_eq!(node_id, nodes[1].node.get_our_node_id());
1374                                 assert_eq!(msg.as_ref().unwrap().data, "Channel closed because commitment or closing transaction was confirmed on chain.");
1375                         },
1376                         MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
1377                                 assert!(update_add_htlcs.is_empty());
1378                                 assert!(update_fail_htlcs.is_empty());
1379                                 assert_eq!(update_fulfill_htlcs.len(), 1);
1380                                 assert!(update_fail_malformed_htlcs.is_empty());
1381                                 assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
1382                         },
1383                         _ => panic!("Unexpected event"),
1384                 }
1385         }
1386 }
1387
1388 #[test]
1389 fn test_basic_channel_reserve() {
1390         let chanmon_cfgs = create_chanmon_cfgs(2);
1391         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1392         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1393         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1394         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1395
1396         let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
1397         let channel_reserve = chan_stat.channel_reserve_msat;
1398
1399         // The 2* and +1 are for the fee spike reserve.
1400         let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], nodes[1], chan.2), 1 + 1, &get_channel_type_features!(nodes[0], nodes[1], chan.2));
1401         let max_can_send = 5000000 - channel_reserve - commit_tx_fee;
1402         let (mut route, our_payment_hash, _, our_payment_secret) =
1403                 get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
1404         route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
1405         let err = nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1406                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).err().unwrap();
1407         match err {
1408                 PaymentSendFailure::AllFailedResendSafe(ref fails) => {
1409                         if let &APIError::ChannelUnavailable { .. } = &fails[0] {}
1410                         else { panic!("Unexpected error variant"); }
1411                 },
1412                 _ => panic!("Unexpected error variant"),
1413         }
1414         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1415
1416         send_payment(&nodes[0], &vec![&nodes[1]], max_can_send);
1417 }
1418
1419 #[test]
1420 fn test_fee_spike_violation_fails_htlc() {
1421         let chanmon_cfgs = create_chanmon_cfgs(2);
1422         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1423         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1424         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1425         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1426
1427         let (mut route, payment_hash, _, payment_secret) =
1428                 get_route_and_payment_hash!(nodes[0], nodes[1], 3460000);
1429         route.paths[0].hops[0].fee_msat += 1;
1430         // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1431         let secp_ctx = Secp256k1::new();
1432         let session_priv = SecretKey::from_slice(&[42; 32]).expect("RNG is bad!");
1433
1434         let cur_height = nodes[1].node.best_block.read().unwrap().height + 1;
1435
1436         let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
1437         let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret);
1438         let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
1439                 3460001, &recipient_onion_fields, cur_height, &None).unwrap();
1440         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
1441         let msg = msgs::UpdateAddHTLC {
1442                 channel_id: chan.2,
1443                 htlc_id: 0,
1444                 amount_msat: htlc_msat,
1445                 payment_hash: payment_hash,
1446                 cltv_expiry: htlc_cltv,
1447                 onion_routing_packet: onion_packet,
1448                 skimmed_fee_msat: None,
1449                 blinding_point: None,
1450         };
1451
1452         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
1453
1454         // Now manually create the commitment_signed message corresponding to the update_add
1455         // nodes[0] just sent. In the code for construction of this message, "local" refers
1456         // to the sender of the message, and "remote" refers to the receiver.
1457
1458         let feerate_per_kw = get_feerate!(nodes[0], nodes[1], chan.2);
1459
1460         const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
1461
1462         // Get the TestChannelSigner for each channel, which will be used to (1) get the keys
1463         // needed to sign the new commitment tx and (2) sign the new commitment tx.
1464         let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point, local_funding) = {
1465                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
1466                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
1467                 let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
1468                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
1469                 ).flatten().unwrap();
1470                 let chan_signer = local_chan.get_signer();
1471                 // Make the signer believe we validated another commitment, so we can release the secret
1472                 chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
1473
1474                 let pubkeys = chan_signer.as_ref().pubkeys();
1475                 (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
1476                  chan_signer.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER),
1477                  chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx),
1478                  chan_signer.as_ref().pubkeys().funding_pubkey)
1479         };
1480         let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = {
1481                 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
1482                 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
1483                 let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
1484                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
1485                 ).flatten().unwrap();
1486                 let chan_signer = remote_chan.get_signer();
1487                 let pubkeys = chan_signer.as_ref().pubkeys();
1488                 (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
1489                  chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
1490                  chan_signer.as_ref().pubkeys().funding_pubkey)
1491         };
1492
1493         // Assemble the set of keys we can use for signatures for our commitment_signed message.
1494         let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint,
1495                 &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint);
1496
1497         // Build the remote commitment transaction so we can sign it, and then later use the
1498         // signature for the commitment_signed message.
1499         let local_chan_balance = 1313;
1500
1501         let accepted_htlc_info = chan_utils::HTLCOutputInCommitment {
1502                 offered: false,
1503                 amount_msat: 3460001,
1504                 cltv_expiry: htlc_cltv,
1505                 payment_hash,
1506                 transaction_output_index: Some(1),
1507         };
1508
1509         let commitment_number = INITIAL_COMMITMENT_NUMBER - 1;
1510
1511         let res = {
1512                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
1513                 let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
1514                 let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
1515                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
1516                 ).flatten().unwrap();
1517                 let local_chan_signer = local_chan.get_signer();
1518                 let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
1519                         commitment_number,
1520                         95000,
1521                         local_chan_balance,
1522                         local_funding, remote_funding,
1523                         commit_tx_keys.clone(),
1524                         feerate_per_kw,
1525                         &mut vec![(accepted_htlc_info, ())],
1526                         &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
1527                 );
1528                 local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), Vec::new(), &secp_ctx).unwrap()
1529         };
1530
1531         let commit_signed_msg = msgs::CommitmentSigned {
1532                 channel_id: chan.2,
1533                 signature: res.0,
1534                 htlc_signatures: res.1,
1535                 #[cfg(taproot)]
1536                 partial_signature_with_nonce: None,
1537         };
1538
1539         // Send the commitment_signed message to the nodes[1].
1540         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
1541         let _ = nodes[1].node.get_and_clear_pending_msg_events();
1542
1543         // Send the RAA to nodes[1].
1544         let raa_msg = msgs::RevokeAndACK {
1545                 channel_id: chan.2,
1546                 per_commitment_secret: local_secret,
1547                 next_per_commitment_point: next_local_point,
1548                 #[cfg(taproot)]
1549                 next_local_nonce: None,
1550         };
1551         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa_msg);
1552
1553         let events = nodes[1].node.get_and_clear_pending_msg_events();
1554         assert_eq!(events.len(), 1);
1555         // Make sure the HTLC failed in the way we expect.
1556         match events[0] {
1557                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, .. }, .. } => {
1558                         assert_eq!(update_fail_htlcs.len(), 1);
1559                         update_fail_htlcs[0].clone()
1560                 },
1561                 _ => panic!("Unexpected event"),
1562         };
1563         nodes[1].logger.assert_log("lightning::ln::channel",
1564                 format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1);
1565
1566         check_added_monitors!(nodes[1], 2);
1567 }
1568
1569 #[test]
1570 fn test_chan_reserve_violation_outbound_htlc_inbound_chan() {
1571         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1572         // Set the fee rate for the channel very high, to the point where the fundee
1573         // sending any above-dust amount would result in a channel reserve violation.
1574         // In this test we check that we would be prevented from sending an HTLC in
1575         // this situation.
1576         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1577         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1578         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1579         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1580         let default_config = UserConfig::default();
1581         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1582
1583         let mut push_amt = 100_000_000;
1584         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1585
1586         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1587
1588         let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt);
1589
1590         // Fetch a route in advance as we will be unable to once we're unable to send.
1591         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000);
1592         // Sending exactly enough to hit the reserve amount should be accepted
1593         for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1594                 route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1595         }
1596
1597         // However one more HTLC should be significantly over the reserve amount and fail.
1598         unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash,
1599                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1600                 ), true, APIError::ChannelUnavailable { .. }, {});
1601         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1602 }
1603
1604 #[test]
1605 fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
1606         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1607         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1608         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1609         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1610         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1611         let default_config = UserConfig::default();
1612         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1613
1614         // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
1615         // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
1616         // transaction fee with 0 HTLCs (183 sats)).
1617         let mut push_amt = 100_000_000;
1618         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1619         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1620         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt);
1621
1622         // Send four HTLCs to cover the initial push_msat buffer we're required to include
1623         for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1624                 route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1625         }
1626
1627         let (mut route, payment_hash, _, payment_secret) =
1628                 get_route_and_payment_hash!(nodes[1], nodes[0], 1000);
1629         route.paths[0].hops[0].fee_msat = 700_000;
1630         // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1631         let secp_ctx = Secp256k1::new();
1632         let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
1633         let cur_height = nodes[1].node.best_block.read().unwrap().height + 1;
1634         let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
1635         let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret);
1636         let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
1637                 700_000, &recipient_onion_fields, cur_height, &None).unwrap();
1638         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
1639         let msg = msgs::UpdateAddHTLC {
1640                 channel_id: chan.2,
1641                 htlc_id: MIN_AFFORDABLE_HTLC_COUNT as u64,
1642                 amount_msat: htlc_msat,
1643                 payment_hash: payment_hash,
1644                 cltv_expiry: htlc_cltv,
1645                 onion_routing_packet: onion_packet,
1646                 skimmed_fee_msat: None,
1647                 blinding_point: None,
1648         };
1649
1650         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg);
1651         // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
1652         nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value", 3);
1653         assert_eq!(nodes[0].node.list_channels().len(), 0);
1654         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
1655         assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
1656         check_added_monitors!(nodes[0], 1);
1657         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() },
1658                 [nodes[1].node.get_our_node_id()], 100000);
1659 }
1660
1661 #[test]
1662 fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() {
1663         // Test that if we receive many dust HTLCs over an outbound channel, they don't count when
1664         // calculating our commitment transaction fee (this was previously broken).
1665         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1666         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1667
1668         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1669         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
1670         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1671         let default_config = UserConfig::default();
1672         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1673
1674         // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
1675         // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
1676         // transaction fee with 0 HTLCs (183 sats)).
1677         let mut push_amt = 100_000_000;
1678         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1679         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1680         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt);
1681
1682         let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000
1683                 + feerate_per_kw as u64 * htlc_success_tx_weight(&channel_type_features) / 1000 * 1000 - 1;
1684         // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel
1685         // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the
1686         // commitment transaction fee.
1687         route_payment(&nodes[1], &[&nodes[0]], dust_amt);
1688
1689         // Send four HTLCs to cover the initial push_msat buffer we're required to include
1690         for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1691                 route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1692         }
1693
1694         // One more than the dust amt should fail, however.
1695         let (mut route, our_payment_hash, _, our_payment_secret) =
1696                 get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt);
1697         route.paths[0].hops[0].fee_msat += 1;
1698         unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash,
1699                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1700                 ), true, APIError::ChannelUnavailable { .. }, {});
1701 }
1702
1703 #[test]
1704 fn test_chan_init_feerate_unaffordability() {
1705         // Test that we will reject channel opens which do not leave enough to pay for any HTLCs due to
1706         // channel reserve and feerate requirements.
1707         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1708         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1709         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1710         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1711         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1712         let default_config = UserConfig::default();
1713         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1714
1715         // Set the push_msat amount such that nodes[0] will not be able to afford to add even a single
1716         // HTLC.
1717         let mut push_amt = 100_000_000;
1718         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1719         assert_eq!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt + 1, 42, None, None).unwrap_err(),
1720                 APIError::APIMisuseError { err: "Funding amount (356) can't even pay fee for initial commitment transaction fee of 357.".to_string() });
1721
1722         // During open, we don't have a "counterparty channel reserve" to check against, so that
1723         // requirement only comes into play on the open_channel handling side.
1724         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1725         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt, 42, None, None).unwrap();
1726         let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
1727         open_channel_msg.push_msat += 1;
1728         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
1729
1730         let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
1731         assert_eq!(msg_events.len(), 1);
1732         match msg_events[0] {
1733                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
1734                         assert_eq!(msg.data, "Insufficient funding amount for initial reserve");
1735                 },
1736                 _ => panic!("Unexpected event"),
1737         }
1738 }
1739
1740 #[test]
1741 fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() {
1742         // Test that if we receive many dust HTLCs over an inbound channel, they don't count when
1743         // calculating our counterparty's commitment transaction fee (this was previously broken).
1744         let chanmon_cfgs = create_chanmon_cfgs(2);
1745         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1746         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
1747         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1748         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 98000000);
1749
1750         let payment_amt = 46000; // Dust amount
1751         // In the previous code, these first four payments would succeed.
1752         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1753         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1754         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1755         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1756
1757         // Then these next 5 would be interpreted by nodes[1] as violating the fee spike buffer.
1758         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1759         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1760         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1761         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1762         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1763
1764         // And this last payment previously resulted in nodes[1] closing on its inbound-channel
1765         // counterparty, because it counted all the previous dust HTLCs against nodes[0]'s commitment
1766         // transaction fee and therefore perceived this next payment as a channel reserve violation.
1767         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1768 }
1769
1770 #[test]
1771 fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
1772         let chanmon_cfgs = create_chanmon_cfgs(3);
1773         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1774         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1775         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1776         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1777         let _ = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000);
1778
1779         let feemsat = 239;
1780         let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat;
1781         let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
1782         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
1783         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
1784
1785         // Add a 2* and +1 for the fee spike reserve.
1786         let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features);
1787         let recv_value_1 = (chan_stat.value_to_self_msat - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlc)/2;
1788         let amt_msat_1 = recv_value_1 + total_routing_fee_msat;
1789
1790         // Add a pending HTLC.
1791         let (route_1, our_payment_hash_1, _, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat_1);
1792         let payment_event_1 = {
1793                 nodes[0].node.send_payment_with_route(&route_1, our_payment_hash_1,
1794                         RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
1795                 check_added_monitors!(nodes[0], 1);
1796
1797                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1798                 assert_eq!(events.len(), 1);
1799                 SendEvent::from_event(events.remove(0))
1800         };
1801         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
1802
1803         // Attempt to trigger a channel reserve violation --> payment failure.
1804         let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2, &channel_type_features);
1805         let recv_value_2 = chan_stat.value_to_self_msat - amt_msat_1 - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlcs + 1;
1806         let amt_msat_2 = recv_value_2 + total_routing_fee_msat;
1807         let mut route_2 = route_1.clone();
1808         route_2.paths[0].hops.last_mut().unwrap().fee_msat = amt_msat_2;
1809
1810         // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1811         let secp_ctx = Secp256k1::new();
1812         let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
1813         let cur_height = nodes[0].node.best_block.read().unwrap().height + 1;
1814         let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route_2.paths[0], &session_priv).unwrap();
1815         let recipient_onion_fields = RecipientOnionFields::spontaneous_empty();
1816         let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
1817                 &route_2.paths[0], recv_value_2, &recipient_onion_fields, cur_height, &None).unwrap();
1818         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash_1).unwrap();
1819         let msg = msgs::UpdateAddHTLC {
1820                 channel_id: chan.2,
1821                 htlc_id: 1,
1822                 amount_msat: htlc_msat + 1,
1823                 payment_hash: our_payment_hash_1,
1824                 cltv_expiry: htlc_cltv,
1825                 onion_routing_packet: onion_packet,
1826                 skimmed_fee_msat: None,
1827                 blinding_point: None,
1828         };
1829
1830         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
1831         // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
1832         nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote HTLC add would put them under remote reserve value", 3);
1833         assert_eq!(nodes[1].node.list_channels().len(), 1);
1834         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
1835         assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
1836         check_added_monitors!(nodes[1], 1);
1837         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() },
1838                 [nodes[0].node.get_our_node_id()], 100000);
1839 }
1840
1841 #[test]
1842 fn test_inbound_outbound_capacity_is_not_zero() {
1843         let chanmon_cfgs = create_chanmon_cfgs(2);
1844         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1845         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1846         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1847         let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1848         let channels0 = node_chanmgrs[0].list_channels();
1849         let channels1 = node_chanmgrs[1].list_channels();
1850         let default_config = UserConfig::default();
1851         assert_eq!(channels0.len(), 1);
1852         assert_eq!(channels1.len(), 1);
1853
1854         let reserve = get_holder_selected_channel_reserve_satoshis(100_000, &default_config);
1855         assert_eq!(channels0[0].inbound_capacity_msat, 95000000 - reserve*1000);
1856         assert_eq!(channels1[0].outbound_capacity_msat, 95000000 - reserve*1000);
1857
1858         assert_eq!(channels0[0].outbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
1859         assert_eq!(channels1[0].inbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
1860 }
1861
1862 fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64, channel_type_features: &ChannelTypeFeatures) -> u64 {
1863         (commitment_tx_base_weight(channel_type_features) + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000
1864 }
1865
1866 #[test]
1867 fn test_channel_reserve_holding_cell_htlcs() {
1868         let chanmon_cfgs = create_chanmon_cfgs(3);
1869         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1870         // When this test was written, the default base fee floated based on the HTLC count.
1871         // It is now fixed, so we simply set the fee to the expected value here.
1872         let mut config = test_default_channel_config();
1873         config.channel_config.forwarding_fee_base_msat = 239;
1874         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
1875         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1876         let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 190000, 1001);
1877         let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 190000, 1001);
1878
1879         let mut stat01 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
1880         let mut stat11 = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2);
1881
1882         let mut stat12 = get_channel_value_stat!(nodes[1], nodes[2], chan_2.2);
1883         let mut stat22 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2);
1884
1885         macro_rules! expect_forward {
1886                 ($node: expr) => {{
1887                         let mut events = $node.node.get_and_clear_pending_msg_events();
1888                         assert_eq!(events.len(), 1);
1889                         check_added_monitors!($node, 1);
1890                         let payment_event = SendEvent::from_event(events.remove(0));
1891                         payment_event
1892                 }}
1893         }
1894
1895         let feemsat = 239; // set above
1896         let total_fee_msat = (nodes.len() - 2) as u64 * feemsat;
1897         let feerate = get_feerate!(nodes[0], nodes[1], chan_1.2);
1898         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_1.2);
1899
1900         let recv_value_0 = stat01.counterparty_max_htlc_value_in_flight_msat - total_fee_msat;
1901
1902         // attempt to send amt_msat > their_max_htlc_value_in_flight_msat
1903         {
1904                 let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
1905                         .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0);
1906                 let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, recv_value_0);
1907                 route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
1908                 assert!(route.paths[0].hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
1909
1910                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1911                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1912                         ), true, APIError::ChannelUnavailable { .. }, {});
1913                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1914         }
1915
1916         // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete
1917         // nodes[0]'s wealth
1918         loop {
1919                 let amt_msat = recv_value_0 + total_fee_msat;
1920                 // 3 for the 3 HTLCs that will be sent, 2* and +1 for the fee spike reserve.
1921                 // Also, ensure that each payment has enough to be over the dust limit to
1922                 // ensure it'll be included in each commit tx fee calculation.
1923                 let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features);
1924                 let ensure_htlc_amounts_above_dust_buffer = 3 * (stat01.counterparty_dust_limit_msat + 1000);
1925                 if stat01.value_to_self_msat < stat01.channel_reserve_msat + commit_tx_fee_all_htlcs + ensure_htlc_amounts_above_dust_buffer + amt_msat {
1926                         break;
1927                 }
1928
1929                 let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
1930                         .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0);
1931                 let route = get_route!(nodes[0], payment_params, recv_value_0).unwrap();
1932                 let (payment_preimage, ..) = send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], recv_value_0);
1933                 claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
1934
1935                 let (stat01_, stat11_, stat12_, stat22_) = (
1936                         get_channel_value_stat!(nodes[0], nodes[1], chan_1.2),
1937                         get_channel_value_stat!(nodes[1], nodes[0], chan_1.2),
1938                         get_channel_value_stat!(nodes[1], nodes[2], chan_2.2),
1939                         get_channel_value_stat!(nodes[2], nodes[1], chan_2.2),
1940                 );
1941
1942                 assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat);
1943                 assert_eq!(stat11_.value_to_self_msat, stat11.value_to_self_msat + amt_msat);
1944                 assert_eq!(stat12_.value_to_self_msat, stat12.value_to_self_msat - (amt_msat - feemsat));
1945                 assert_eq!(stat22_.value_to_self_msat, stat22.value_to_self_msat + (amt_msat - feemsat));
1946                 stat01 = stat01_; stat11 = stat11_; stat12 = stat12_; stat22 = stat22_;
1947         }
1948
1949         // adding pending output.
1950         // 2* and +1 HTLCs on the commit tx fee for the fee spike reserve.
1951         // The reason we're dividing by two here is as follows: the dividend is the total outbound liquidity
1952         // after fees, the channel reserve, and the fee spike buffer are removed. We eventually want to
1953         // divide this quantity into 3 portions, that will each be sent in an HTLC. This allows us
1954         // to test channel channel reserve policy at the edges of what amount is sendable, i.e.
1955         // cases where 1 msat over X amount will cause a payment failure, but anything less than
1956         // that can be sent successfully. So, dividing by two is a somewhat arbitrary way of getting
1957         // the amount of the first of these aforementioned 3 payments. The reason we split into 3 payments
1958         // is to test the behavior of the holding cell with respect to channel reserve and commit tx fee
1959         // policy.
1960         let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features);
1961         let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs)/2;
1962         let amt_msat_1 = recv_value_1 + total_fee_msat;
1963
1964         let (route_1, our_payment_hash_1, our_payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_1);
1965         let payment_event_1 = {
1966                 nodes[0].node.send_payment_with_route(&route_1, our_payment_hash_1,
1967                         RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
1968                 check_added_monitors!(nodes[0], 1);
1969
1970                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1971                 assert_eq!(events.len(), 1);
1972                 SendEvent::from_event(events.remove(0))
1973         };
1974         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
1975
1976         // channel reserve test with htlc pending output > 0
1977         let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs;
1978         {
1979                 let mut route = route_1.clone();
1980                 route.paths[0].hops.last_mut().unwrap().fee_msat = recv_value_2 + 1;
1981                 let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]);
1982                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1983                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1984                         ), true, APIError::ChannelUnavailable { .. }, {});
1985                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1986         }
1987
1988         // split the rest to test holding cell
1989         let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features);
1990         let additional_htlc_cost_msat = commit_tx_fee_3_htlcs - commit_tx_fee_2_htlcs;
1991         let recv_value_21 = recv_value_2/2 - additional_htlc_cost_msat/2;
1992         let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat - additional_htlc_cost_msat;
1993         {
1994                 let stat = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
1995                 assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat + commit_tx_fee_3_htlcs), stat.channel_reserve_msat);
1996         }
1997
1998         // now see if they go through on both sides
1999         let (route_21, our_payment_hash_21, our_payment_preimage_21, our_payment_secret_21) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_21);
2000         // but this will stuck in the holding cell
2001         nodes[0].node.send_payment_with_route(&route_21, our_payment_hash_21,
2002                 RecipientOnionFields::secret_only(our_payment_secret_21), PaymentId(our_payment_hash_21.0)).unwrap();
2003         check_added_monitors!(nodes[0], 0);
2004         let events = nodes[0].node.get_and_clear_pending_events();
2005         assert_eq!(events.len(), 0);
2006
2007         // test with outbound holding cell amount > 0
2008         {
2009                 let (mut route, our_payment_hash, _, our_payment_secret) =
2010                         get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22);
2011                 route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
2012                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
2013                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
2014                         ), true, APIError::ChannelUnavailable { .. }, {});
2015                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2016         }
2017
2018         let (route_22, our_payment_hash_22, our_payment_preimage_22, our_payment_secret_22) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22);
2019         // this will also stuck in the holding cell
2020         nodes[0].node.send_payment_with_route(&route_22, our_payment_hash_22,
2021                 RecipientOnionFields::secret_only(our_payment_secret_22), PaymentId(our_payment_hash_22.0)).unwrap();
2022         check_added_monitors!(nodes[0], 0);
2023         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
2024         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2025
2026         // flush the pending htlc
2027         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg);
2028         let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2029         check_added_monitors!(nodes[1], 1);
2030
2031         // the pending htlc should be promoted to committed
2032         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack);
2033         check_added_monitors!(nodes[0], 1);
2034         let commitment_update_2 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2035
2036         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed);
2037         let bs_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2038         // No commitment_signed so get_event_msg's assert(len == 1) passes
2039         check_added_monitors!(nodes[0], 1);
2040
2041         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack);
2042         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2043         check_added_monitors!(nodes[1], 1);
2044
2045         expect_pending_htlcs_forwardable!(nodes[1]);
2046
2047         let ref payment_event_11 = expect_forward!(nodes[1]);
2048         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]);
2049         commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false);
2050
2051         expect_pending_htlcs_forwardable!(nodes[2]);
2052         expect_payment_claimable!(nodes[2], our_payment_hash_1, our_payment_secret_1, recv_value_1);
2053
2054         // flush the htlcs in the holding cell
2055         assert_eq!(commitment_update_2.update_add_htlcs.len(), 2);
2056         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]);
2057         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]);
2058         commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false);
2059         expect_pending_htlcs_forwardable!(nodes[1]);
2060
2061         let ref payment_event_3 = expect_forward!(nodes[1]);
2062         assert_eq!(payment_event_3.msgs.len(), 2);
2063         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[0]);
2064         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]);
2065
2066         commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false);
2067         expect_pending_htlcs_forwardable!(nodes[2]);
2068
2069         let events = nodes[2].node.get_and_clear_pending_events();
2070         assert_eq!(events.len(), 2);
2071         match events[0] {
2072                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
2073                         assert_eq!(our_payment_hash_21, *payment_hash);
2074                         assert_eq!(recv_value_21, amount_msat);
2075                         assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
2076                         assert_eq!(via_channel_id, Some(chan_2.2));
2077                         match &purpose {
2078                                 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
2079                                         assert!(payment_preimage.is_none());
2080                                         assert_eq!(our_payment_secret_21, *payment_secret);
2081                                 },
2082                                 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
2083                         }
2084                 },
2085                 _ => panic!("Unexpected event"),
2086         }
2087         match events[1] {
2088                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
2089                         assert_eq!(our_payment_hash_22, *payment_hash);
2090                         assert_eq!(recv_value_22, amount_msat);
2091                         assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
2092                         assert_eq!(via_channel_id, Some(chan_2.2));
2093                         match &purpose {
2094                                 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
2095                                         assert!(payment_preimage.is_none());
2096                                         assert_eq!(our_payment_secret_22, *payment_secret);
2097                                 },
2098                                 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
2099                         }
2100                 },
2101                 _ => panic!("Unexpected event"),
2102         }
2103
2104         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_1);
2105         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21);
2106         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22);
2107
2108         let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1, &channel_type_features);
2109         let recv_value_3 = commit_tx_fee_2_htlcs - commit_tx_fee_0_htlcs - total_fee_msat;
2110         send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_3);
2111
2112         let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
2113         let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat) - (recv_value_3 + total_fee_msat);
2114         let stat0 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
2115         assert_eq!(stat0.value_to_self_msat, expected_value_to_self);
2116         assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat + commit_tx_fee_1_htlc);
2117
2118         let stat2 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2);
2119         assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22 + recv_value_3);
2120 }
2121
2122 #[test]
2123 fn channel_reserve_in_flight_removes() {
2124         // In cases where one side claims an HTLC, it thinks it has additional available funds that it
2125         // can send to its counterparty, but due to update ordering, the other side may not yet have
2126         // considered those HTLCs fully removed.
2127         // This tests that we don't count HTLCs which will not be included in the next remote
2128         // commitment transaction towards the reserve value (as it implies no commitment transaction
2129         // will be generated which violates the remote reserve value).
2130         // This was broken previously, and discovered by the chanmon_fail_consistency fuzz test.
2131         // To test this we:
2132         //  * route two HTLCs from A to B (note that, at a high level, this test is checking that, when
2133         //    you consider the values of both of these HTLCs, B may not send an HTLC back to A, but if
2134         //    you only consider the value of the first HTLC, it may not),
2135         //  * start routing a third HTLC from A to B,
2136         //  * claim the first two HTLCs (though B will generate an update_fulfill for one, and put
2137         //    the other claim in its holding cell, as it immediately goes into AwaitingRAA),
2138         //  * deliver the first fulfill from B
2139         //  * deliver the update_add and an RAA from A, resulting in B freeing the second holding cell
2140         //    claim,
2141         //  * deliver A's response CS and RAA.
2142         //    This results in A having the second HTLC in AwaitingRemovedRemoteRevoke, but B having
2143         //    removed it fully. B now has the push_msat plus the first two HTLCs in value.
2144         //  * Now B happily sends another HTLC, potentially violating its reserve value from A's point
2145         //    of view (if A counts the AwaitingRemovedRemoteRevoke HTLC).
2146         let chanmon_cfgs = create_chanmon_cfgs(2);
2147         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2148         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2149         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2150         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2151
2152         let b_chan_values = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2);
2153         // Route the first two HTLCs.
2154         let payment_value_1 = b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000;
2155         let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], payment_value_1);
2156         let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 20_000);
2157
2158         // Start routing the third HTLC (this is just used to get everyone in the right state).
2159         let (route, payment_hash_3, payment_preimage_3, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
2160         let send_1 = {
2161                 nodes[0].node.send_payment_with_route(&route, payment_hash_3,
2162                         RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
2163                 check_added_monitors!(nodes[0], 1);
2164                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2165                 assert_eq!(events.len(), 1);
2166                 SendEvent::from_event(events.remove(0))
2167         };
2168
2169         // Now claim both of the first two HTLCs on B's end, putting B in AwaitingRAA and generating an
2170         // initial fulfill/CS.
2171         nodes[1].node.claim_funds(payment_preimage_1);
2172         expect_payment_claimed!(nodes[1], payment_hash_1, payment_value_1);
2173         check_added_monitors!(nodes[1], 1);
2174         let bs_removes = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2175
2176         // This claim goes in B's holding cell, allowing us to have a pending B->A RAA which does not
2177         // remove the second HTLC when we send the HTLC back from B to A.
2178         nodes[1].node.claim_funds(payment_preimage_2);
2179         expect_payment_claimed!(nodes[1], payment_hash_2, 20_000);
2180         check_added_monitors!(nodes[1], 1);
2181         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2182
2183         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_removes.update_fulfill_htlcs[0]);
2184         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed);
2185         check_added_monitors!(nodes[0], 1);
2186         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2187         expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
2188
2189         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]);
2190         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg);
2191         check_added_monitors!(nodes[1], 1);
2192         // B is already AwaitingRAA, so cant generate a CS here
2193         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2194
2195         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2196         check_added_monitors!(nodes[1], 1);
2197         let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2198
2199         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2200         check_added_monitors!(nodes[0], 1);
2201         let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2202
2203         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2204         check_added_monitors!(nodes[1], 1);
2205         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2206
2207         // The second HTLCis removed, but as A is in AwaitingRAA it can't generate a CS here, so the
2208         // RAA that B generated above doesn't fully resolve the second HTLC from A's point of view.
2209         // However, the RAA A generates here *does* fully resolve the HTLC from B's point of view (as A
2210         // can no longer broadcast a commitment transaction with it and B has the preimage so can go
2211         // on-chain as necessary).
2212         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_cs.update_fulfill_htlcs[0]);
2213         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
2214         check_added_monitors!(nodes[0], 1);
2215         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2216         expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false);
2217
2218         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2219         check_added_monitors!(nodes[1], 1);
2220         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2221
2222         expect_pending_htlcs_forwardable!(nodes[1]);
2223         expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 100000);
2224
2225         // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't
2226         // resolve the second HTLC from A's point of view.
2227         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2228         check_added_monitors!(nodes[0], 1);
2229         expect_payment_path_successful!(nodes[0]);
2230         let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2231
2232         // Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back
2233         // to A to ensure that A doesn't count the almost-removed HTLC in update_add processing.
2234         let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[1], nodes[0], 10000);
2235         let send_2 = {
2236                 nodes[1].node.send_payment_with_route(&route, payment_hash_4,
2237                         RecipientOnionFields::secret_only(payment_secret_4), PaymentId(payment_hash_4.0)).unwrap();
2238                 check_added_monitors!(nodes[1], 1);
2239                 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
2240                 assert_eq!(events.len(), 1);
2241                 SendEvent::from_event(events.remove(0))
2242         };
2243
2244         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_2.msgs[0]);
2245         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_2.commitment_msg);
2246         check_added_monitors!(nodes[0], 1);
2247         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2248
2249         // Now just resolve all the outstanding messages/HTLCs for completeness...
2250
2251         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2252         check_added_monitors!(nodes[1], 1);
2253         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2254
2255         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2256         check_added_monitors!(nodes[1], 1);
2257
2258         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2259         check_added_monitors!(nodes[0], 1);
2260         expect_payment_path_successful!(nodes[0]);
2261         let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2262
2263         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2264         check_added_monitors!(nodes[1], 1);
2265         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2266
2267         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2268         check_added_monitors!(nodes[0], 1);
2269
2270         expect_pending_htlcs_forwardable!(nodes[0]);
2271         expect_payment_claimable!(nodes[0], payment_hash_4, payment_secret_4, 10000);
2272
2273         claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4);
2274         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
2275 }
2276
2277 #[test]
2278 fn channel_monitor_network_test() {
2279         // Simple test which builds a network of ChannelManagers, connects them to each other, and
2280         // tests that ChannelMonitor is able to recover from various states.
2281         let chanmon_cfgs = create_chanmon_cfgs(5);
2282         let node_cfgs = create_node_cfgs(5, &chanmon_cfgs);
2283         let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &[None, None, None, None, None]);
2284         let nodes = create_network(5, &node_cfgs, &node_chanmgrs);
2285
2286         // Create some initial channels
2287         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2288         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
2289         let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
2290         let chan_4 = create_announced_chan_between_nodes(&nodes, 3, 4);
2291
2292         // Make sure all nodes are at the same starting height
2293         connect_blocks(&nodes[0], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
2294         connect_blocks(&nodes[1], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
2295         connect_blocks(&nodes[2], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
2296         connect_blocks(&nodes[3], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[3].best_block_info().1);
2297         connect_blocks(&nodes[4], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[4].best_block_info().1);
2298
2299         // Rebalance the network a bit by relaying one payment through all the channels...
2300         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2301         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2302         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2303         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2304
2305         // Simple case with no pending HTLCs:
2306         let error_message = "Channel force-closed";
2307         nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
2308         check_added_monitors!(nodes[1], 1);
2309         check_closed_broadcast!(nodes[1], true);
2310         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[0].node.get_our_node_id()], 100000);
2311         {
2312                 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
2313                 assert_eq!(node_txn.len(), 1);
2314                 mine_transaction(&nodes[1], &node_txn[0]);
2315                 if nodes[1].connect_style.borrow().updates_best_block_first() {
2316                         let _ = nodes[1].tx_broadcaster.txn_broadcast();
2317                 }
2318
2319                 mine_transaction(&nodes[0], &node_txn[0]);
2320                 check_added_monitors!(nodes[0], 1);
2321                 test_txn_broadcast(&nodes[0], &chan_1, Some(node_txn[0].clone()), HTLCType::NONE);
2322         }
2323         check_closed_broadcast!(nodes[0], true);
2324         assert_eq!(nodes[0].node.list_channels().len(), 0);
2325         assert_eq!(nodes[1].node.list_channels().len(), 1);
2326         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2327
2328         // One pending HTLC is discarded by the force-close:
2329         let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000);
2330
2331         // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
2332         // broadcasted until we reach the timelock time).
2333         let error_message = "Channel force-closed";
2334         nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id(), error_message.to_string()).unwrap();
2335         check_closed_broadcast!(nodes[1], true);
2336         check_added_monitors!(nodes[1], 1);
2337         {
2338                 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE);
2339                 connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1);
2340                 test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
2341                 mine_transaction(&nodes[2], &node_txn[0]);
2342                 check_added_monitors!(nodes[2], 1);
2343                 test_txn_broadcast(&nodes[2], &chan_2, Some(node_txn[0].clone()), HTLCType::NONE);
2344         }
2345         check_closed_broadcast!(nodes[2], true);
2346         assert_eq!(nodes[1].node.list_channels().len(), 0);
2347         assert_eq!(nodes[2].node.list_channels().len(), 1);
2348         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[2].node.get_our_node_id()], 100000);
2349         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2350
2351         macro_rules! claim_funds {
2352                 ($node: expr, $prev_node: expr, $preimage: expr, $payment_hash: expr) => {
2353                         {
2354                                 $node.node.claim_funds($preimage);
2355                                 expect_payment_claimed!($node, $payment_hash, 3_000_000);
2356                                 check_added_monitors!($node, 1);
2357
2358                                 let events = $node.node.get_and_clear_pending_msg_events();
2359                                 assert_eq!(events.len(), 1);
2360                                 match events[0] {
2361                                         MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. } } => {
2362                                                 assert!(update_add_htlcs.is_empty());
2363                                                 assert!(update_fail_htlcs.is_empty());
2364                                                 assert_eq!(*node_id, $prev_node.node.get_our_node_id());
2365                                         },
2366                                         _ => panic!("Unexpected event"),
2367                                 };
2368                         }
2369                 }
2370         }
2371
2372         // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
2373         // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
2374         let error_message = "Channel force-closed";
2375         nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id(), error_message.to_string()).unwrap();
2376         check_added_monitors!(nodes[2], 1);
2377         check_closed_broadcast!(nodes[2], true);
2378         let node2_commitment_txid;
2379         {
2380                 let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE);
2381                 connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1);
2382                 test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT);
2383                 node2_commitment_txid = node_txn[0].txid();
2384
2385                 // Claim the payment on nodes[3], giving it knowledge of the preimage
2386                 claim_funds!(nodes[3], nodes[2], payment_preimage_1, payment_hash_1);
2387                 mine_transaction(&nodes[3], &node_txn[0]);
2388                 check_added_monitors!(nodes[3], 1);
2389                 check_preimage_claim(&nodes[3], &node_txn);
2390         }
2391         check_closed_broadcast!(nodes[3], true);
2392         assert_eq!(nodes[2].node.list_channels().len(), 0);
2393         assert_eq!(nodes[3].node.list_channels().len(), 1);
2394         check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[3].node.get_our_node_id()], 100000);
2395         check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
2396
2397         // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
2398         // confusing us in the following tests.
2399         let chan_3_mon = nodes[3].chain_monitor.chain_monitor.remove_monitor(&OutPoint { txid: chan_3.3.txid(), index: 0 });
2400
2401         // One pending HTLC to time out:
2402         let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[3], &[&nodes[4]], 3_000_000);
2403         // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for
2404         // buffer space).
2405
2406         let (close_chan_update_1, close_chan_update_2) = {
2407                 connect_blocks(&nodes[3], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
2408                 let events = nodes[3].node.get_and_clear_pending_msg_events();
2409                 assert_eq!(events.len(), 2);
2410                 let close_chan_update_1 = match events[1] {
2411                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
2412                                 msg.clone()
2413                         },
2414                         _ => panic!("Unexpected event"),
2415                 };
2416                 match events[0] {
2417                         MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
2418                                 assert_eq!(node_id, nodes[4].node.get_our_node_id());
2419                         },
2420                         _ => panic!("Unexpected event"),
2421                 }
2422                 check_added_monitors!(nodes[3], 1);
2423
2424                 // Clear bumped claiming txn spending node 2 commitment tx. Bumped txn are generated after reaching some height timer.
2425                 {
2426                         let mut node_txn = nodes[3].tx_broadcaster.txn_broadcasted.lock().unwrap();
2427                         node_txn.retain(|tx| {
2428                                 if tx.input[0].previous_output.txid == node2_commitment_txid {
2429                                         false
2430                                 } else { true }
2431                         });
2432                 }
2433
2434                 let node_txn = test_txn_broadcast(&nodes[3], &chan_4, None, HTLCType::TIMEOUT);
2435
2436                 // Claim the payment on nodes[4], giving it knowledge of the preimage
2437                 claim_funds!(nodes[4], nodes[3], payment_preimage_2, payment_hash_2);
2438
2439                 connect_blocks(&nodes[4], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2);
2440                 let events = nodes[4].node.get_and_clear_pending_msg_events();
2441                 assert_eq!(events.len(), 2);
2442                 let close_chan_update_2 = match events[1] {
2443                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
2444                                 msg.clone()
2445                         },
2446                         _ => panic!("Unexpected event"),
2447                 };
2448                 match events[0] {
2449                         MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
2450                                 assert_eq!(node_id, nodes[3].node.get_our_node_id());
2451                         },
2452                         _ => panic!("Unexpected event"),
2453                 }
2454                 check_added_monitors!(nodes[4], 1);
2455                 test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
2456                 check_closed_event!(nodes[4], 1, ClosureReason::HTLCsTimedOut, [nodes[3].node.get_our_node_id()], 100000);
2457
2458                 mine_transaction(&nodes[4], &node_txn[0]);
2459                 check_preimage_claim(&nodes[4], &node_txn);
2460                 (close_chan_update_1, close_chan_update_2)
2461         };
2462         nodes[3].gossip_sync.handle_channel_update(&close_chan_update_2).unwrap();
2463         nodes[4].gossip_sync.handle_channel_update(&close_chan_update_1).unwrap();
2464         assert_eq!(nodes[3].node.list_channels().len(), 0);
2465         assert_eq!(nodes[4].node.list_channels().len(), 0);
2466
2467         assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon),
2468                 Ok(ChannelMonitorUpdateStatus::Completed));
2469         check_closed_event!(nodes[3], 1, ClosureReason::HTLCsTimedOut, [nodes[4].node.get_our_node_id()], 100000);
2470 }
2471
2472 #[test]
2473 fn test_justice_tx_htlc_timeout() {
2474         // Test justice txn built on revoked HTLC-Timeout tx, against both sides
2475         let mut alice_config = test_default_channel_config();
2476         alice_config.channel_handshake_config.announced_channel = true;
2477         alice_config.channel_handshake_limits.force_announced_channel_preference = false;
2478         alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5;
2479         let mut bob_config = test_default_channel_config();
2480         bob_config.channel_handshake_config.announced_channel = true;
2481         bob_config.channel_handshake_limits.force_announced_channel_preference = false;
2482         bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3;
2483         let user_cfgs = [Some(alice_config), Some(bob_config)];
2484         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2485         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2486         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
2487         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2488         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
2489         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2490         // Create some new channels:
2491         let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1);
2492
2493         // A pending HTLC which will be revoked:
2494         let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2495         // Get the will-be-revoked local txn from nodes[0]
2496         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_5.2);
2497         assert_eq!(revoked_local_txn.len(), 2); // First commitment tx, then HTLC tx
2498         assert_eq!(revoked_local_txn[0].input.len(), 1);
2499         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_5.3.txid());
2500         assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to 0 are present
2501         assert_eq!(revoked_local_txn[1].input.len(), 1);
2502         assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
2503         assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
2504         // Revoke the old state
2505         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
2506
2507         {
2508                 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2509                 {
2510                         let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
2511                         assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2512                         assert_eq!(node_txn[0].input.len(), 2); // We should claim the revoked output and the HTLC output
2513                         check_spends!(node_txn[0], revoked_local_txn[0]);
2514                         node_txn.swap_remove(0);
2515                 }
2516                 check_added_monitors!(nodes[1], 1);
2517                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2518                 test_txn_broadcast(&nodes[1], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
2519
2520                 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2521                 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
2522                 // Verify broadcast of revoked HTLC-timeout
2523                 let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
2524                 check_added_monitors!(nodes[0], 1);
2525                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2526                 // Broadcast revoked HTLC-timeout on node 1
2527                 mine_transaction(&nodes[1], &node_txn[1]);
2528                 test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone());
2529         }
2530         get_announce_close_broadcast_events(&nodes, 0, 1);
2531         assert_eq!(nodes[0].node.list_channels().len(), 0);
2532         assert_eq!(nodes[1].node.list_channels().len(), 0);
2533 }
2534
2535 #[test]
2536 fn test_justice_tx_htlc_success() {
2537         // Test justice txn built on revoked HTLC-Success tx, against both sides
2538         let mut alice_config = test_default_channel_config();
2539         alice_config.channel_handshake_config.announced_channel = true;
2540         alice_config.channel_handshake_limits.force_announced_channel_preference = false;
2541         alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5;
2542         let mut bob_config = test_default_channel_config();
2543         bob_config.channel_handshake_config.announced_channel = true;
2544         bob_config.channel_handshake_limits.force_announced_channel_preference = false;
2545         bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3;
2546         let user_cfgs = [Some(alice_config), Some(bob_config)];
2547         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2548         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2549         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
2550         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2551         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
2552         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2553         // Create some new channels:
2554         let chan_6 = create_announced_chan_between_nodes(&nodes, 0, 1);
2555
2556         // A pending HTLC which will be revoked:
2557         let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2558         // Get the will-be-revoked local txn from B
2559         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_6.2);
2560         assert_eq!(revoked_local_txn.len(), 1); // Only commitment tx
2561         assert_eq!(revoked_local_txn[0].input.len(), 1);
2562         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_6.3.txid());
2563         assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to A are present
2564         // Revoke the old state
2565         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_4);
2566         {
2567                 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2568                 {
2569                         let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
2570                         assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2571                         assert_eq!(node_txn[0].input.len(), 1); // We claim the received HTLC output
2572
2573                         check_spends!(node_txn[0], revoked_local_txn[0]);
2574                         node_txn.swap_remove(0);
2575                 }
2576                 check_added_monitors!(nodes[0], 1);
2577                 test_txn_broadcast(&nodes[0], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
2578
2579                 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2580                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2581                 let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
2582                 check_added_monitors!(nodes[1], 1);
2583                 mine_transaction(&nodes[0], &node_txn[1]);
2584                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2585                 test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone());
2586         }
2587         get_announce_close_broadcast_events(&nodes, 0, 1);
2588         assert_eq!(nodes[0].node.list_channels().len(), 0);
2589         assert_eq!(nodes[1].node.list_channels().len(), 0);
2590 }
2591
2592 #[test]
2593 fn revoked_output_claim() {
2594         // Simple test to ensure a node will claim a revoked output when a stale remote commitment
2595         // transaction is broadcast by its counterparty
2596         let chanmon_cfgs = create_chanmon_cfgs(2);
2597         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2598         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2599         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2600         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2601         // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output
2602         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2603         assert_eq!(revoked_local_txn.len(), 1);
2604         // Only output is the full channel value back to nodes[0]:
2605         assert_eq!(revoked_local_txn[0].output.len(), 1);
2606         // Send a payment through, updating everyone's latest commitment txn
2607         send_payment(&nodes[0], &vec!(&nodes[1])[..], 5000000);
2608
2609         // Inform nodes[1] that nodes[0] broadcast a stale tx
2610         mine_transaction(&nodes[1], &revoked_local_txn[0]);
2611         check_added_monitors!(nodes[1], 1);
2612         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2613         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
2614         assert_eq!(node_txn.len(), 1); // ChannelMonitor: justice tx against revoked to_local output
2615
2616         check_spends!(node_txn[0], revoked_local_txn[0]);
2617
2618         // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
2619         mine_transaction(&nodes[0], &revoked_local_txn[0]);
2620         get_announce_close_broadcast_events(&nodes, 0, 1);
2621         check_added_monitors!(nodes[0], 1);
2622         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2623 }
2624
2625 #[test]
2626 fn test_forming_justice_tx_from_monitor_updates() {
2627         do_test_forming_justice_tx_from_monitor_updates(true);
2628         do_test_forming_justice_tx_from_monitor_updates(false);
2629 }
2630
2631 fn do_test_forming_justice_tx_from_monitor_updates(broadcast_initial_commitment: bool) {
2632         // Simple test to make sure that the justice tx formed in WatchtowerPersister
2633         // is properly formed and can be broadcasted/confirmed successfully in the event
2634         // that a revoked commitment transaction is broadcasted
2635         // (Similar to `revoked_output_claim` test but we get the justice tx + broadcast manually)
2636         let chanmon_cfgs = create_chanmon_cfgs(2);
2637         let destination_script0 = chanmon_cfgs[0].keys_manager.get_destination_script([0; 32]).unwrap();
2638         let destination_script1 = chanmon_cfgs[1].keys_manager.get_destination_script([0; 32]).unwrap();
2639         let persisters = vec![WatchtowerPersister::new(destination_script0),
2640                 WatchtowerPersister::new(destination_script1)];
2641         let node_cfgs = create_node_cfgs_with_persisters(2, &chanmon_cfgs, persisters.iter().collect());
2642         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2643         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2644         let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
2645         let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 };
2646
2647         if !broadcast_initial_commitment {
2648                 // Send a payment to move the channel forward
2649                 send_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000);
2650         }
2651
2652         // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output.
2653         // We'll keep this commitment transaction to broadcast once it's revoked.
2654         let revoked_local_txn = get_local_commitment_txn!(nodes[0], channel_id);
2655         assert_eq!(revoked_local_txn.len(), 1);
2656         let revoked_commitment_tx = &revoked_local_txn[0];
2657
2658         // Send another payment, now revoking the previous commitment tx
2659         send_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000);
2660
2661         let justice_tx = persisters[1].justice_tx(funding_txo, &revoked_commitment_tx.txid()).unwrap();
2662         check_spends!(justice_tx, revoked_commitment_tx);
2663
2664         mine_transactions(&nodes[1], &[revoked_commitment_tx, &justice_tx]);
2665         mine_transactions(&nodes[0], &[revoked_commitment_tx, &justice_tx]);
2666
2667         check_added_monitors!(nodes[1], 1);
2668         check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false,
2669                 &[nodes[0].node.get_our_node_id()], 100_000);
2670         get_announce_close_broadcast_events(&nodes, 1, 0);
2671
2672         check_added_monitors!(nodes[0], 1);
2673         check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false,
2674                 &[nodes[1].node.get_our_node_id()], 100_000);
2675
2676         // Check that the justice tx has sent the revoked output value to nodes[1]
2677         let monitor = get_monitor!(nodes[1], channel_id);
2678         let total_claimable_balance = monitor.get_claimable_balances().iter().fold(0, |sum, balance| {
2679                 match balance {
2680                         channelmonitor::Balance::ClaimableAwaitingConfirmations { amount_satoshis, .. } => sum + amount_satoshis,
2681                         _ => panic!("Unexpected balance type"),
2682                 }
2683         });
2684         // On the first commitment, node[1]'s balance was below dust so it didn't have an output
2685         let node1_channel_balance = if broadcast_initial_commitment { 0 } else { revoked_commitment_tx.output[0].value.to_sat() };
2686         let expected_claimable_balance = node1_channel_balance + justice_tx.output[0].value.to_sat();
2687         assert_eq!(total_claimable_balance, expected_claimable_balance);
2688 }
2689
2690
2691 #[test]
2692 fn claim_htlc_outputs_shared_tx() {
2693         // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx
2694         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2695         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2696         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2697         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2698         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2699
2700         // Create some new channel:
2701         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2702
2703         // Rebalance the network to generate htlc in the two directions
2704         send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
2705         // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx
2706         let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
2707         let (_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
2708
2709         // Get the will-be-revoked local txn from node[0]
2710         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2711         assert_eq!(revoked_local_txn.len(), 2); // commitment tx + 1 HTLC-Timeout tx
2712         assert_eq!(revoked_local_txn[0].input.len(), 1);
2713         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
2714         assert_eq!(revoked_local_txn[1].input.len(), 1);
2715         assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
2716         assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
2717         check_spends!(revoked_local_txn[1], revoked_local_txn[0]);
2718
2719         //Revoke the old state
2720         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
2721
2722         {
2723                 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2724                 check_added_monitors!(nodes[0], 1);
2725                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2726                 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2727                 check_added_monitors!(nodes[1], 1);
2728                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2729                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2730                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
2731
2732                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2733                 assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2734
2735                 assert_eq!(node_txn[0].input.len(), 3); // Claim the revoked output + both revoked HTLC outputs
2736                 check_spends!(node_txn[0], revoked_local_txn[0]);
2737
2738                 let mut witness_lens = BTreeSet::new();
2739                 witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
2740                 witness_lens.insert(node_txn[0].input[1].witness.last().unwrap().len());
2741                 witness_lens.insert(node_txn[0].input[2].witness.last().unwrap().len());
2742                 assert_eq!(witness_lens.len(), 3);
2743                 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
2744                 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
2745                 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
2746
2747                 // Finally, mine the penalty transaction and check that we get an HTLC failure after
2748                 // ANTI_REORG_DELAY confirmations.
2749                 mine_transaction(&nodes[1], &node_txn[0]);
2750                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2751                 expect_payment_failed!(nodes[1], payment_hash_2, false);
2752         }
2753         get_announce_close_broadcast_events(&nodes, 0, 1);
2754         assert_eq!(nodes[0].node.list_channels().len(), 0);
2755         assert_eq!(nodes[1].node.list_channels().len(), 0);
2756 }
2757
2758 #[test]
2759 fn claim_htlc_outputs_single_tx() {
2760         // Node revoked old state, htlcs have timed out, claim each of them in separated justice tx
2761         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2762         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2763         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2764         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2765         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2766
2767         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2768
2769         // Rebalance the network to generate htlc in the two directions
2770         send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
2771         // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this
2772         // time as two different claim transactions as we're gonna to timeout htlc with given a high current height
2773         let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
2774         let (_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
2775
2776         // Get the will-be-revoked local txn from node[0]
2777         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2778
2779         //Revoke the old state
2780         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
2781
2782         {
2783                 confirm_transaction_at(&nodes[0], &revoked_local_txn[0], 100);
2784                 check_added_monitors!(nodes[0], 1);
2785                 confirm_transaction_at(&nodes[1], &revoked_local_txn[0], 100);
2786                 check_added_monitors!(nodes[1], 1);
2787                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2788                 let mut events = nodes[0].node.get_and_clear_pending_events();
2789                 expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
2790                 match events.last().unwrap() {
2791                         Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
2792                         _ => panic!("Unexpected event"),
2793                 }
2794
2795                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2796                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
2797
2798                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcast();
2799
2800                 // Check the pair local commitment and HTLC-timeout broadcast due to HTLC expiration
2801                 assert_eq!(node_txn[0].input.len(), 1);
2802                 check_spends!(node_txn[0], chan_1.3);
2803                 assert_eq!(node_txn[1].input.len(), 1);
2804                 let witness_script = node_txn[1].input[0].witness.last().unwrap();
2805                 assert_eq!(witness_script.len(), OFFERED_HTLC_SCRIPT_WEIGHT); //Spending an offered htlc output
2806                 check_spends!(node_txn[1], node_txn[0]);
2807
2808                 // Filter out any non justice transactions.
2809                 node_txn.retain(|tx| tx.input[0].previous_output.txid == revoked_local_txn[0].txid());
2810                 assert!(node_txn.len() > 3);
2811
2812                 assert_eq!(node_txn[0].input.len(), 1);
2813                 assert_eq!(node_txn[1].input.len(), 1);
2814                 assert_eq!(node_txn[2].input.len(), 1);
2815
2816                 check_spends!(node_txn[0], revoked_local_txn[0]);
2817                 check_spends!(node_txn[1], revoked_local_txn[0]);
2818                 check_spends!(node_txn[2], revoked_local_txn[0]);
2819
2820                 let mut witness_lens = BTreeSet::new();
2821                 witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
2822                 witness_lens.insert(node_txn[1].input[0].witness.last().unwrap().len());
2823                 witness_lens.insert(node_txn[2].input[0].witness.last().unwrap().len());
2824                 assert_eq!(witness_lens.len(), 3);
2825                 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
2826                 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
2827                 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
2828
2829                 // Finally, mine the penalty transactions and check that we get an HTLC failure after
2830                 // ANTI_REORG_DELAY confirmations.
2831                 mine_transaction(&nodes[1], &node_txn[0]);
2832                 mine_transaction(&nodes[1], &node_txn[1]);
2833                 mine_transaction(&nodes[1], &node_txn[2]);
2834                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2835                 expect_payment_failed!(nodes[1], payment_hash_2, false);
2836         }
2837         get_announce_close_broadcast_events(&nodes, 0, 1);
2838         assert_eq!(nodes[0].node.list_channels().len(), 0);
2839         assert_eq!(nodes[1].node.list_channels().len(), 0);
2840 }
2841
2842 #[test]
2843 fn test_htlc_on_chain_success() {
2844         // Test that in case of a unilateral close onchain, we detect the state of output and pass
2845         // the preimage backward accordingly. So here we test that ChannelManager is
2846         // broadcasting the right event to other nodes in payment path.
2847         // We test with two HTLCs simultaneously as that was not handled correctly in the past.
2848         // A --------------------> B ----------------------> C (preimage)
2849         // First, C should claim the HTLC outputs via HTLC-Success when its own latest local
2850         // commitment transaction was broadcast.
2851         // Then, B should learn the preimage from said transactions, attempting to claim backwards
2852         // towards B.
2853         // B should be able to claim via preimage if A then broadcasts its local tx.
2854         // Finally, when A sees B's latest local commitment transaction it should be able to claim
2855         // the HTLC outputs via the preimage it learned (which, once confirmed should generate a
2856         // PaymentSent event).
2857
2858         let chanmon_cfgs = create_chanmon_cfgs(3);
2859         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2860         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2861         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2862
2863         // Create some initial channels
2864         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2865         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
2866
2867         // Ensure all nodes are at the same height
2868         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
2869         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
2870         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
2871         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
2872
2873         // Rebalance the network a bit by relaying one payment through all the channels...
2874         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2875         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2876
2877         let (our_payment_preimage, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
2878         let (our_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
2879
2880         // Broadcast legit commitment tx from C on B's chain
2881         // Broadcast HTLC Success transaction by C on received output from C's commitment tx on B's chain
2882         let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
2883         assert_eq!(commitment_tx.len(), 1);
2884         check_spends!(commitment_tx[0], chan_2.3);
2885         nodes[2].node.claim_funds(our_payment_preimage);
2886         expect_payment_claimed!(nodes[2], payment_hash_1, 3_000_000);
2887         nodes[2].node.claim_funds(our_payment_preimage_2);
2888         expect_payment_claimed!(nodes[2], payment_hash_2, 3_000_000);
2889         check_added_monitors!(nodes[2], 2);
2890         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2891         assert!(updates.update_add_htlcs.is_empty());
2892         assert!(updates.update_fail_htlcs.is_empty());
2893         assert!(updates.update_fail_malformed_htlcs.is_empty());
2894         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2895
2896         mine_transaction(&nodes[2], &commitment_tx[0]);
2897         check_closed_broadcast!(nodes[2], true);
2898         check_added_monitors!(nodes[2], 1);
2899         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2900         let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 2 (2 * HTLC-Success tx)
2901         assert_eq!(node_txn.len(), 2);
2902         check_spends!(node_txn[0], commitment_tx[0]);
2903         check_spends!(node_txn[1], commitment_tx[0]);
2904         assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2905         assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2906         assert!(node_txn[0].output[0].script_pubkey.is_p2wsh()); // revokeable output
2907         assert!(node_txn[1].output[0].script_pubkey.is_p2wsh()); // revokeable output
2908         assert_eq!(node_txn[0].lock_time, LockTime::ZERO);
2909         assert_eq!(node_txn[1].lock_time, LockTime::ZERO);
2910
2911         // Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward
2912         connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), node_txn[0].clone(), node_txn[1].clone()]));
2913         connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
2914         {
2915                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
2916                 assert_eq!(added_monitors.len(), 1);
2917                 assert_eq!(added_monitors[0].0.txid, chan_2.3.txid());
2918                 added_monitors.clear();
2919         }
2920         let forwarded_events = nodes[1].node.get_and_clear_pending_events();
2921         assert_eq!(forwarded_events.len(), 3);
2922         match forwarded_events[0] {
2923                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
2924                 _ => panic!("Unexpected event"),
2925         }
2926         let chan_id = Some(chan_1.2);
2927         match forwarded_events[1] {
2928                 Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
2929                         next_channel_id, outbound_amount_forwarded_msat, ..
2930                 } => {
2931                         assert_eq!(total_fee_earned_msat, Some(1000));
2932                         assert_eq!(prev_channel_id, chan_id);
2933                         assert_eq!(claim_from_onchain_tx, true);
2934                         assert_eq!(next_channel_id, Some(chan_2.2));
2935                         assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
2936                 },
2937                 _ => panic!()
2938         }
2939         match forwarded_events[2] {
2940                 Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
2941                         next_channel_id, outbound_amount_forwarded_msat, ..
2942                 } => {
2943                         assert_eq!(total_fee_earned_msat, Some(1000));
2944                         assert_eq!(prev_channel_id, chan_id);
2945                         assert_eq!(claim_from_onchain_tx, true);
2946                         assert_eq!(next_channel_id, Some(chan_2.2));
2947                         assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
2948                 },
2949                 _ => panic!()
2950         }
2951         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
2952         {
2953                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
2954                 assert_eq!(added_monitors.len(), 2);
2955                 assert_eq!(added_monitors[0].0.txid, chan_1.3.txid());
2956                 assert_eq!(added_monitors[1].0.txid, chan_1.3.txid());
2957                 added_monitors.clear();
2958         }
2959         assert_eq!(events.len(), 3);
2960
2961         let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
2962         let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events);
2963
2964         match nodes_2_event {
2965                 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id: _ } => {},
2966                 _ => panic!("Unexpected event"),
2967         }
2968
2969         match nodes_0_event {
2970                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
2971                         assert!(update_add_htlcs.is_empty());
2972                         assert!(update_fail_htlcs.is_empty());
2973                         assert_eq!(update_fulfill_htlcs.len(), 1);
2974                         assert!(update_fail_malformed_htlcs.is_empty());
2975                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
2976                 },
2977                 _ => panic!("Unexpected event"),
2978         };
2979
2980         // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
2981         match events[0] {
2982                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
2983                 _ => panic!("Unexpected event"),
2984         }
2985
2986         macro_rules! check_tx_local_broadcast {
2987                 ($node: expr, $htlc_offered: expr, $commitment_tx: expr) => { {
2988                         let mut node_txn = $node.tx_broadcaster.txn_broadcasted.lock().unwrap();
2989                         assert_eq!(node_txn.len(), 2);
2990                         // Node[1]: 2 * HTLC-timeout tx
2991                         // Node[0]: 2 * HTLC-timeout tx
2992                         check_spends!(node_txn[0], $commitment_tx);
2993                         check_spends!(node_txn[1], $commitment_tx);
2994                         assert_ne!(node_txn[0].lock_time, LockTime::ZERO);
2995                         assert_ne!(node_txn[1].lock_time, LockTime::ZERO);
2996                         if $htlc_offered {
2997                                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2998                                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2999                                 assert!(node_txn[0].output[0].script_pubkey.is_p2wsh()); // revokeable output
3000                                 assert!(node_txn[1].output[0].script_pubkey.is_p2wsh()); // revokeable output
3001                         } else {
3002                                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3003                                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3004                                 assert!(node_txn[0].output[0].script_pubkey.is_p2wpkh()); // direct payment
3005                                 assert!(node_txn[1].output[0].script_pubkey.is_p2wpkh()); // direct payment
3006                         }
3007                         node_txn.clear();
3008                 } }
3009         }
3010         // nodes[1] now broadcasts its own timeout-claim of the output that nodes[2] just claimed via success.
3011         check_tx_local_broadcast!(nodes[1], false, commitment_tx[0]);
3012
3013         // Broadcast legit commitment tx from A on B's chain
3014         // Broadcast preimage tx by B on offered output from A commitment tx  on A's chain
3015         let node_a_commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
3016         check_spends!(node_a_commitment_tx[0], chan_1.3);
3017         mine_transaction(&nodes[1], &node_a_commitment_tx[0]);
3018         check_closed_broadcast!(nodes[1], true);
3019         check_added_monitors!(nodes[1], 1);
3020         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
3021         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
3022         assert!(node_txn.len() == 1 || node_txn.len() == 3); // HTLC-Success, 2* RBF bumps of above HTLC txn
3023         let commitment_spend =
3024                 if node_txn.len() == 1 {
3025                         &node_txn[0]
3026                 } else {
3027                         // Certain `ConnectStyle`s will cause RBF bumps of the previous HTLC transaction to be broadcast.
3028                         // FullBlockViaListen
3029                         if node_txn[0].input[0].previous_output.txid == node_a_commitment_tx[0].txid() {
3030                                 check_spends!(node_txn[1], commitment_tx[0]);
3031                                 check_spends!(node_txn[2], commitment_tx[0]);
3032                                 assert_ne!(node_txn[1].input[0].previous_output.vout, node_txn[2].input[0].previous_output.vout);
3033                                 &node_txn[0]
3034                         } else {
3035                                 check_spends!(node_txn[0], commitment_tx[0]);
3036                                 check_spends!(node_txn[1], commitment_tx[0]);
3037                                 assert_ne!(node_txn[0].input[0].previous_output.vout, node_txn[1].input[0].previous_output.vout);
3038                                 &node_txn[2]
3039                         }
3040                 };
3041
3042         check_spends!(commitment_spend, node_a_commitment_tx[0]);
3043         assert_eq!(commitment_spend.input.len(), 2);
3044         assert_eq!(commitment_spend.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
3045         assert_eq!(commitment_spend.input[1].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
3046         assert_eq!(commitment_spend.lock_time.to_consensus_u32(), nodes[1].best_block_info().1);
3047         assert!(commitment_spend.output[0].script_pubkey.is_p2wpkh()); // direct payment
3048         // We don't bother to check that B can claim the HTLC output on its commitment tx here as
3049         // we already checked the same situation with A.
3050
3051         // Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent
3052         connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_a_commitment_tx[0].clone(), commitment_spend.clone()]));
3053         connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
3054         check_closed_broadcast!(nodes[0], true);
3055         check_added_monitors!(nodes[0], 1);
3056         let events = nodes[0].node.get_and_clear_pending_events();
3057         assert_eq!(events.len(), 5);
3058         let mut first_claimed = false;
3059         for event in events {
3060                 match event {
3061                         Event::PaymentSent { payment_preimage, payment_hash, .. } => {
3062                                 if payment_preimage == our_payment_preimage && payment_hash == payment_hash_1 {
3063                                         assert!(!first_claimed);
3064                                         first_claimed = true;
3065                                 } else {
3066                                         assert_eq!(payment_preimage, our_payment_preimage_2);
3067                                         assert_eq!(payment_hash, payment_hash_2);
3068                                 }
3069                         },
3070                         Event::PaymentPathSuccessful { .. } => {},
3071                         Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {},
3072                         _ => panic!("Unexpected event"),
3073                 }
3074         }
3075         check_tx_local_broadcast!(nodes[0], true, node_a_commitment_tx[0]);
3076 }
3077
3078 fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
3079         // Test that in case of a unilateral close onchain, we detect the state of output and
3080         // timeout the HTLC backward accordingly. So here we test that ChannelManager is
3081         // broadcasting the right event to other nodes in payment path.
3082         // A ------------------> B ----------------------> C (timeout)
3083         //    B's commitment tx                 C's commitment tx
3084         //            \                                  \
3085         //         B's HTLC timeout tx               B's timeout tx
3086
3087         let chanmon_cfgs = create_chanmon_cfgs(3);
3088         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3089         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3090         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3091         *nodes[0].connect_style.borrow_mut() = connect_style;
3092         *nodes[1].connect_style.borrow_mut() = connect_style;
3093         *nodes[2].connect_style.borrow_mut() = connect_style;
3094
3095         // Create some intial channels
3096         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
3097         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3098
3099         // Rebalance the network a bit by relaying one payment thorugh all the channels...
3100         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
3101         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
3102
3103         let (_payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
3104
3105         // Broadcast legit commitment tx from C on B's chain
3106         let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
3107         check_spends!(commitment_tx[0], chan_2.3);
3108         nodes[2].node.fail_htlc_backwards(&payment_hash);
3109         check_added_monitors!(nodes[2], 0);
3110         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]);
3111         check_added_monitors!(nodes[2], 1);
3112
3113         let events = nodes[2].node.get_and_clear_pending_msg_events();
3114         assert_eq!(events.len(), 1);
3115         match events[0] {
3116                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3117                         assert!(update_add_htlcs.is_empty());
3118                         assert!(!update_fail_htlcs.is_empty());
3119                         assert!(update_fulfill_htlcs.is_empty());
3120                         assert!(update_fail_malformed_htlcs.is_empty());
3121                         assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
3122                 },
3123                 _ => panic!("Unexpected event"),
3124         };
3125         mine_transaction(&nodes[2], &commitment_tx[0]);
3126         check_closed_broadcast!(nodes[2], true);
3127         check_added_monitors!(nodes[2], 1);
3128         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
3129         let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
3130         assert_eq!(node_txn.len(), 0);
3131
3132         // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain
3133         // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
3134         mine_transaction(&nodes[1], &commitment_tx[0]);
3135         check_closed_event!(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false
3136                 , [nodes[2].node.get_our_node_id()], 100000);
3137         connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1);
3138         let timeout_tx = {
3139                 let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
3140                 if nodes[1].connect_style.borrow().skips_blocks() {
3141                         assert_eq!(txn.len(), 1);
3142                 } else {
3143                         assert_eq!(txn.len(), 3); // Two extra fee bumps for timeout transaction
3144                 }
3145                 txn.iter().for_each(|tx| check_spends!(tx, commitment_tx[0]));
3146                 assert_eq!(txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3147                 txn.remove(0)
3148         };
3149
3150         mine_transaction(&nodes[1], &timeout_tx);
3151         check_added_monitors!(nodes[1], 1);
3152         check_closed_broadcast!(nodes[1], true);
3153
3154         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3155
3156         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
3157         check_added_monitors!(nodes[1], 1);
3158         let events = nodes[1].node.get_and_clear_pending_msg_events();
3159         assert_eq!(events.len(), 1);
3160         match events[0] {
3161                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3162                         assert!(update_add_htlcs.is_empty());
3163                         assert!(!update_fail_htlcs.is_empty());
3164                         assert!(update_fulfill_htlcs.is_empty());
3165                         assert!(update_fail_malformed_htlcs.is_empty());
3166                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3167                 },
3168                 _ => panic!("Unexpected event"),
3169         };
3170
3171         // Broadcast legit commitment tx from B on A's chain
3172         let commitment_tx = get_local_commitment_txn!(nodes[1], chan_1.2);
3173         check_spends!(commitment_tx[0], chan_1.3);
3174
3175         mine_transaction(&nodes[0], &commitment_tx[0]);
3176         connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
3177
3178         check_closed_broadcast!(nodes[0], true);
3179         check_added_monitors!(nodes[0], 1);
3180         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
3181         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // 1 timeout tx
3182         assert_eq!(node_txn.len(), 1);
3183         check_spends!(node_txn[0], commitment_tx[0]);
3184         assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3185 }
3186
3187 #[test]
3188 fn test_htlc_on_chain_timeout() {
3189         do_test_htlc_on_chain_timeout(ConnectStyle::BestBlockFirstSkippingBlocks);
3190         do_test_htlc_on_chain_timeout(ConnectStyle::TransactionsFirstSkippingBlocks);
3191         do_test_htlc_on_chain_timeout(ConnectStyle::FullBlockViaListen);
3192 }
3193
3194 #[test]
3195 fn test_simple_commitment_revoked_fail_backward() {
3196         // Test that in case of a revoked commitment tx, we detect the resolution of output by justice tx
3197         // and fail backward accordingly.
3198
3199         let chanmon_cfgs = create_chanmon_cfgs(3);
3200         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3201         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3202         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3203
3204         // Create some initial channels
3205         create_announced_chan_between_nodes(&nodes, 0, 1);
3206         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3207
3208         let (payment_preimage, _payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
3209         // Get the will-be-revoked local txn from nodes[2]
3210         let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
3211         // Revoke the old state
3212         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
3213
3214         let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
3215
3216         mine_transaction(&nodes[1], &revoked_local_txn[0]);
3217         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
3218         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3219         check_added_monitors!(nodes[1], 1);
3220         check_closed_broadcast!(nodes[1], true);
3221
3222         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
3223         check_added_monitors!(nodes[1], 1);
3224         let events = nodes[1].node.get_and_clear_pending_msg_events();
3225         assert_eq!(events.len(), 1);
3226         match events[0] {
3227                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
3228                         assert!(update_add_htlcs.is_empty());
3229                         assert_eq!(update_fail_htlcs.len(), 1);
3230                         assert!(update_fulfill_htlcs.is_empty());
3231                         assert!(update_fail_malformed_htlcs.is_empty());
3232                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3233
3234                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
3235                         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
3236                         expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_2.0.contents.short_channel_id, true);
3237                 },
3238                 _ => panic!("Unexpected event"),
3239         }
3240 }
3241
3242 fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use_dust: bool, no_to_remote: bool) {
3243         // Test that if our counterparty broadcasts a revoked commitment transaction we fail all
3244         // pending HTLCs on that channel backwards even if the HTLCs aren't present in our latest
3245         // commitment transaction anymore.
3246         // To do this, we have the peer which will broadcast a revoked commitment transaction send
3247         // a number of update_fail/commitment_signed updates without ever sending the RAA in
3248         // response to our commitment_signed. This is somewhat misbehavior-y, though not
3249         // technically disallowed and we should probably handle it reasonably.
3250         // Note that this is pretty exhaustive as an outbound HTLC which we haven't yet
3251         // failed/fulfilled backwards must be in at least one of the latest two remote commitment
3252         // transactions:
3253         // * Once we move it out of our holding cell/add it, we will immediately include it in a
3254         //   commitment_signed (implying it will be in the latest remote commitment transaction).
3255         // * Once they remove it, we will send a (the first) commitment_signed without the HTLC,
3256         //   and once they revoke the previous commitment transaction (allowing us to send a new
3257         //   commitment_signed) we will be free to fail/fulfill the HTLC backwards.
3258         let chanmon_cfgs = create_chanmon_cfgs(3);
3259         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3260         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3261         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3262
3263         // Create some initial channels
3264         create_announced_chan_between_nodes(&nodes, 0, 1);
3265         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3266
3267         let (payment_preimage, _payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], if no_to_remote { 10_000 } else { 3_000_000 });
3268         // Get the will-be-revoked local txn from nodes[2]
3269         let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
3270         assert_eq!(revoked_local_txn[0].output.len(), if no_to_remote { 1 } else { 2 });
3271         // Revoke the old state
3272         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
3273
3274         let value = if use_dust {
3275                 // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as
3276                 // well, so HTLCs at exactly the dust limit will not be included in commitment txn.
3277                 nodes[2].node.per_peer_state.read().unwrap().get(&nodes[1].node.get_our_node_id())
3278                         .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context().holder_dust_limit_satoshis * 1000
3279         } else { 3000000 };
3280
3281         let (_, first_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3282         let (_, second_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3283         let (_, third_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3284
3285         nodes[2].node.fail_htlc_backwards(&first_payment_hash);
3286         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]);
3287         check_added_monitors!(nodes[2], 1);
3288         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3289         assert!(updates.update_add_htlcs.is_empty());
3290         assert!(updates.update_fulfill_htlcs.is_empty());
3291         assert!(updates.update_fail_malformed_htlcs.is_empty());
3292         assert_eq!(updates.update_fail_htlcs.len(), 1);
3293         assert!(updates.update_fee.is_none());
3294         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3295         let bs_raa = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
3296         // Drop the last RAA from 3 -> 2
3297
3298         nodes[2].node.fail_htlc_backwards(&second_payment_hash);
3299         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: second_payment_hash }]);
3300         check_added_monitors!(nodes[2], 1);
3301         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3302         assert!(updates.update_add_htlcs.is_empty());
3303         assert!(updates.update_fulfill_htlcs.is_empty());
3304         assert!(updates.update_fail_malformed_htlcs.is_empty());
3305         assert_eq!(updates.update_fail_htlcs.len(), 1);
3306         assert!(updates.update_fee.is_none());
3307         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3308         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
3309         check_added_monitors!(nodes[1], 1);
3310         // Note that nodes[1] is in AwaitingRAA, so won't send a CS
3311         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
3312         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
3313         check_added_monitors!(nodes[2], 1);
3314
3315         nodes[2].node.fail_htlc_backwards(&third_payment_hash);
3316         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: third_payment_hash }]);
3317         check_added_monitors!(nodes[2], 1);
3318         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3319         assert!(updates.update_add_htlcs.is_empty());
3320         assert!(updates.update_fulfill_htlcs.is_empty());
3321         assert!(updates.update_fail_malformed_htlcs.is_empty());
3322         assert_eq!(updates.update_fail_htlcs.len(), 1);
3323         assert!(updates.update_fee.is_none());
3324         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3325         // At this point first_payment_hash has dropped out of the latest two commitment
3326         // transactions that nodes[1] is tracking...
3327         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
3328         check_added_monitors!(nodes[1], 1);
3329         // Note that nodes[1] is (still) in AwaitingRAA, so won't send a CS
3330         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
3331         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
3332         check_added_monitors!(nodes[2], 1);
3333
3334         // Add a fourth HTLC, this one will get sequestered away in nodes[1]'s holding cell waiting
3335         // on nodes[2]'s RAA.
3336         let (route, fourth_payment_hash, _, fourth_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 1000000);
3337         nodes[1].node.send_payment_with_route(&route, fourth_payment_hash,
3338                 RecipientOnionFields::secret_only(fourth_payment_secret), PaymentId(fourth_payment_hash.0)).unwrap();
3339         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3340         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3341         check_added_monitors!(nodes[1], 0);
3342
3343         if deliver_bs_raa {
3344                 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_raa);
3345                 // One monitor for the new revocation preimage, no second on as we won't generate a new
3346                 // commitment transaction for nodes[0] until process_pending_htlc_forwards().
3347                 check_added_monitors!(nodes[1], 1);
3348                 let events = nodes[1].node.get_and_clear_pending_events();
3349                 assert_eq!(events.len(), 2);
3350                 match events[0] {
3351                         Event::HTLCHandlingFailed { .. } => { },
3352                         _ => panic!("Unexpected event"),
3353                 }
3354                 match events[1] {
3355                         Event::PendingHTLCsForwardable { .. } => { },
3356                         _ => panic!("Unexpected event"),
3357                 };
3358                 // Deliberately don't process the pending fail-back so they all fail back at once after
3359                 // block connection just like the !deliver_bs_raa case
3360         }
3361
3362         let mut failed_htlcs = new_hash_set();
3363         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3364
3365         mine_transaction(&nodes[1], &revoked_local_txn[0]);
3366         check_added_monitors!(nodes[1], 1);
3367         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3368
3369         let events = nodes[1].node.get_and_clear_pending_events();
3370         assert_eq!(events.len(), if deliver_bs_raa { 3 + nodes.len() - 1 } else { 4 + nodes.len() });
3371         assert!(events.iter().any(|ev| matches!(
3372                 ev,
3373                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. }
3374         )));
3375         assert!(events.iter().any(|ev| matches!(
3376                 ev,
3377                 Event::PaymentPathFailed { ref payment_hash, .. } if *payment_hash == fourth_payment_hash
3378         )));
3379         assert!(events.iter().any(|ev| matches!(
3380                 ev,
3381                 Event::PaymentFailed { ref payment_hash, .. } if *payment_hash == fourth_payment_hash
3382         )));
3383
3384         nodes[1].node.process_pending_htlc_forwards();
3385         check_added_monitors!(nodes[1], 1);
3386
3387         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
3388         assert_eq!(events.len(), if deliver_bs_raa { 4 } else { 3 });
3389
3390         if deliver_bs_raa {
3391                 let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
3392                 match nodes_2_event {
3393                         MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3394                                 assert_eq!(nodes[2].node.get_our_node_id(), *node_id);
3395                                 assert_eq!(update_add_htlcs.len(), 1);
3396                                 assert!(update_fulfill_htlcs.is_empty());
3397                                 assert!(update_fail_htlcs.is_empty());
3398                                 assert!(update_fail_malformed_htlcs.is_empty());
3399                         },
3400                         _ => panic!("Unexpected event"),
3401                 }
3402         }
3403
3404         let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
3405         match nodes_2_event {
3406                 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { msg: Some(msgs::ErrorMessage { channel_id, ref data }) }, node_id: _ } => {
3407                         assert_eq!(channel_id, chan_2.2);
3408                         assert_eq!(data.as_str(), "Channel closed because commitment or closing transaction was confirmed on chain.");
3409                 },
3410                 _ => panic!("Unexpected event"),
3411         }
3412
3413         let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events);
3414         match nodes_0_event {
3415                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
3416                         assert!(update_add_htlcs.is_empty());
3417                         assert_eq!(update_fail_htlcs.len(), 3);
3418                         assert!(update_fulfill_htlcs.is_empty());
3419                         assert!(update_fail_malformed_htlcs.is_empty());
3420                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3421
3422                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
3423                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[1]);
3424                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[2]);
3425
3426                         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
3427
3428                         let events = nodes[0].node.get_and_clear_pending_events();
3429                         assert_eq!(events.len(), 6);
3430                         match events[0] {
3431                                 Event::PaymentPathFailed { ref payment_hash, ref failure, .. } => {
3432                                         assert!(failed_htlcs.insert(payment_hash.0));
3433                                         // If we delivered B's RAA we got an unknown preimage error, not something
3434                                         // that we should update our routing table for.
3435                                         if !deliver_bs_raa {
3436                                                 if let PathFailure::OnPath { network_update: Some(_) } = failure { } else { panic!("Unexpected path failure") }
3437                                         }
3438                                 },
3439                                 _ => panic!("Unexpected event"),
3440                         }
3441                         match events[1] {
3442                                 Event::PaymentFailed { ref payment_hash, .. } => {
3443                                         assert_eq!(*payment_hash, first_payment_hash);
3444                                 },
3445                                 _ => panic!("Unexpected event"),
3446                         }
3447                         match events[2] {
3448                                 Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => {
3449                                         assert!(failed_htlcs.insert(payment_hash.0));
3450                                 },
3451                                 _ => panic!("Unexpected event"),
3452                         }
3453                         match events[3] {
3454                                 Event::PaymentFailed { ref payment_hash, .. } => {
3455                                         assert_eq!(*payment_hash, second_payment_hash);
3456                                 },
3457                                 _ => panic!("Unexpected event"),
3458                         }
3459                         match events[4] {
3460                                 Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => {
3461                                         assert!(failed_htlcs.insert(payment_hash.0));
3462                                 },
3463                                 _ => panic!("Unexpected event"),
3464                         }
3465                         match events[5] {
3466                                 Event::PaymentFailed { ref payment_hash, .. } => {
3467                                         assert_eq!(*payment_hash, third_payment_hash);
3468                                 },
3469                                 _ => panic!("Unexpected event"),
3470                         }
3471                 },
3472                 _ => panic!("Unexpected event"),
3473         }
3474
3475         // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
3476         match events[0] {
3477                 MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {},
3478                 _ => panic!("Unexpected event"),
3479         }
3480
3481         assert!(failed_htlcs.contains(&first_payment_hash.0));
3482         assert!(failed_htlcs.contains(&second_payment_hash.0));
3483         assert!(failed_htlcs.contains(&third_payment_hash.0));
3484 }
3485
3486 #[test]
3487 fn test_commitment_revoked_fail_backward_exhaustive_a() {
3488         do_test_commitment_revoked_fail_backward_exhaustive(false, true, false);
3489         do_test_commitment_revoked_fail_backward_exhaustive(true, true, false);
3490         do_test_commitment_revoked_fail_backward_exhaustive(false, false, false);
3491         do_test_commitment_revoked_fail_backward_exhaustive(true, false, false);
3492 }
3493
3494 #[test]
3495 fn test_commitment_revoked_fail_backward_exhaustive_b() {
3496         do_test_commitment_revoked_fail_backward_exhaustive(false, true, true);
3497         do_test_commitment_revoked_fail_backward_exhaustive(true, true, true);
3498         do_test_commitment_revoked_fail_backward_exhaustive(false, false, true);
3499         do_test_commitment_revoked_fail_backward_exhaustive(true, false, true);
3500 }
3501
3502 #[test]
3503 fn fail_backward_pending_htlc_upon_channel_failure() {
3504         let chanmon_cfgs = create_chanmon_cfgs(2);
3505         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3506         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3507         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3508         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000);
3509
3510         // Alice -> Bob: Route a payment but without Bob sending revoke_and_ack.
3511         {
3512                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000);
3513                 nodes[0].node.send_payment_with_route(&route, payment_hash, RecipientOnionFields::secret_only(payment_secret),
3514                         PaymentId(payment_hash.0)).unwrap();
3515                 check_added_monitors!(nodes[0], 1);
3516
3517                 let payment_event = {
3518                         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3519                         assert_eq!(events.len(), 1);
3520                         SendEvent::from_event(events.remove(0))
3521                 };
3522                 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
3523                 assert_eq!(payment_event.msgs.len(), 1);
3524         }
3525
3526         // Alice -> Bob: Route another payment but now Alice waits for Bob's earlier revoke_and_ack.
3527         let (route, failed_payment_hash, _, failed_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000);
3528         {
3529                 nodes[0].node.send_payment_with_route(&route, failed_payment_hash,
3530                         RecipientOnionFields::secret_only(failed_payment_secret), PaymentId(failed_payment_hash.0)).unwrap();
3531                 check_added_monitors!(nodes[0], 0);
3532
3533                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3534         }
3535
3536         // Alice <- Bob: Send a malformed update_add_htlc so Alice fails the channel.
3537         {
3538                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 50_000);
3539
3540                 let secp_ctx = Secp256k1::new();
3541                 let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
3542                 let current_height = nodes[1].node.best_block.read().unwrap().height + 1;
3543                 let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret);
3544                 let (onion_payloads, _amount_msat, cltv_expiry) = onion_utils::build_onion_payloads(
3545                         &route.paths[0], 50_000, &recipient_onion_fields, current_height, &None).unwrap();
3546                 let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
3547                 let onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
3548
3549                 // Send a 0-msat update_add_htlc to fail the channel.
3550                 let update_add_htlc = msgs::UpdateAddHTLC {
3551                         channel_id: chan.2,
3552                         htlc_id: 0,
3553                         amount_msat: 0,
3554                         payment_hash,
3555                         cltv_expiry,
3556                         onion_routing_packet,
3557                         skimmed_fee_msat: None,
3558                         blinding_point: None,
3559                 };
3560                 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc);
3561         }
3562         let events = nodes[0].node.get_and_clear_pending_events();
3563         assert_eq!(events.len(), 3);
3564         // Check that Alice fails backward the pending HTLC from the second payment.
3565         match events[0] {
3566                 Event::PaymentPathFailed { payment_hash, .. } => {
3567                         assert_eq!(payment_hash, failed_payment_hash);
3568                 },
3569                 _ => panic!("Unexpected event"),
3570         }
3571         match events[1] {
3572                 Event::PaymentFailed { payment_hash, .. } => {
3573                         assert_eq!(payment_hash, failed_payment_hash);
3574                 },
3575                 _ => panic!("Unexpected event"),
3576         }
3577         match events[2] {
3578                 Event::ChannelClosed { reason: ClosureReason::ProcessingError { ref err }, .. } => {
3579                         assert_eq!(err, "Remote side tried to send a 0-msat HTLC");
3580                 },
3581                 _ => panic!("Unexpected event {:?}", events[1]),
3582         }
3583         check_closed_broadcast!(nodes[0], true);
3584         check_added_monitors!(nodes[0], 1);
3585 }
3586
3587 #[test]
3588 fn test_htlc_ignore_latest_remote_commitment() {
3589         // Test that HTLC transactions spending the latest remote commitment transaction are simply
3590         // ignored if we cannot claim them. This originally tickled an invalid unwrap().
3591         let chanmon_cfgs = create_chanmon_cfgs(2);
3592         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3593         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3594         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3595         if *nodes[1].connect_style.borrow() == ConnectStyle::FullBlockViaListen {
3596                 // We rely on the ability to connect a block redundantly, which isn't allowed via
3597                 // `chain::Listen`, so we never run the test if we randomly get assigned that
3598                 // connect_style.
3599                 return;
3600         }
3601         let funding_tx = create_announced_chan_between_nodes(&nodes, 0, 1).3;
3602         let error_message = "Channel force-closed";
3603         route_payment(&nodes[0], &[&nodes[1]], 10000000);
3604         nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
3605         connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
3606         check_closed_broadcast!(nodes[0], true);
3607         check_added_monitors!(nodes[0], 1);
3608         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
3609
3610         let node_txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
3611         assert_eq!(node_txn.len(), 2);
3612         check_spends!(node_txn[0], funding_tx);
3613         check_spends!(node_txn[1], node_txn[0]);
3614
3615         let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone()]);
3616         connect_block(&nodes[1], &block);
3617         check_closed_broadcast!(nodes[1], true);
3618         check_added_monitors!(nodes[1], 1);
3619         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
3620
3621         // Duplicate the connect_block call since this may happen due to other listeners
3622         // registering new transactions
3623         connect_block(&nodes[1], &block);
3624 }
3625
3626 #[test]
3627 fn test_force_close_fail_back() {
3628         // Check which HTLCs are failed-backwards on channel force-closure
3629         let chanmon_cfgs = create_chanmon_cfgs(3);
3630         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3631         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3632         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3633         create_announced_chan_between_nodes(&nodes, 0, 1);
3634         create_announced_chan_between_nodes(&nodes, 1, 2);
3635
3636         let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000);
3637
3638         let mut payment_event = {
3639                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
3640                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
3641                 check_added_monitors!(nodes[0], 1);
3642
3643                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3644                 assert_eq!(events.len(), 1);
3645                 SendEvent::from_event(events.remove(0))
3646         };
3647
3648         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
3649         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
3650
3651         expect_pending_htlcs_forwardable!(nodes[1]);
3652
3653         let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
3654         assert_eq!(events_2.len(), 1);
3655         payment_event = SendEvent::from_event(events_2.remove(0));
3656         assert_eq!(payment_event.msgs.len(), 1);
3657
3658         check_added_monitors!(nodes[1], 1);
3659         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
3660         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg);
3661         check_added_monitors!(nodes[2], 1);
3662         let (_, _) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3663
3664         // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous
3665         // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
3666         // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
3667         let error_message = "Channel force-closed";
3668         nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
3669         check_closed_broadcast!(nodes[2], true);
3670         check_added_monitors!(nodes[2], 1);
3671         check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
3672         let commitment_tx = {
3673                 let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
3674                 // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
3675                 // have a use for it unless nodes[2] learns the preimage somehow, the funds will go
3676                 // back to nodes[1] upon timeout otherwise.
3677                 assert_eq!(node_txn.len(), 1);
3678                 node_txn.remove(0)
3679         };
3680
3681         mine_transaction(&nodes[1], &commitment_tx);
3682
3683         // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
3684         check_closed_broadcast!(nodes[1], true);
3685         check_added_monitors!(nodes[1], 1);
3686         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
3687
3688         // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
3689         {
3690                 get_monitor!(nodes[2], payment_event.commitment_msg.channel_id)
3691                         .provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator), &node_cfgs[2].logger);
3692         }
3693         mine_transaction(&nodes[2], &commitment_tx);
3694         let mut node_txn = nodes[2].tx_broadcaster.txn_broadcast();
3695         assert_eq!(node_txn.len(), if nodes[2].connect_style.borrow().updates_best_block_first() { 2 } else { 1 });
3696         let htlc_tx = node_txn.pop().unwrap();
3697         assert_eq!(htlc_tx.input.len(), 1);
3698         assert_eq!(htlc_tx.input[0].previous_output.txid, commitment_tx.txid());
3699         assert_eq!(htlc_tx.lock_time, LockTime::ZERO); // Must be an HTLC-Success
3700         assert_eq!(htlc_tx.input[0].witness.len(), 5); // Must be an HTLC-Success
3701
3702         check_spends!(htlc_tx, commitment_tx);
3703 }
3704
3705 #[test]
3706 fn test_dup_events_on_peer_disconnect() {
3707         // Test that if we receive a duplicative update_fulfill_htlc message after a reconnect we do
3708         // not generate a corresponding duplicative PaymentSent event. This did not use to be the case
3709         // as we used to generate the event immediately upon receipt of the payment preimage in the
3710         // update_fulfill_htlc message.
3711
3712         let chanmon_cfgs = create_chanmon_cfgs(2);
3713         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3714         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3715         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3716         create_announced_chan_between_nodes(&nodes, 0, 1);
3717
3718         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
3719
3720         nodes[1].node.claim_funds(payment_preimage);
3721         expect_payment_claimed!(nodes[1], payment_hash, 1_000_000);
3722         check_added_monitors!(nodes[1], 1);
3723         let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
3724         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]);
3725         expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
3726
3727         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3728         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3729
3730         let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3731         reconnect_args.pending_htlc_claims.0 = 1;
3732         reconnect_nodes(reconnect_args);
3733         expect_payment_path_successful!(nodes[0]);
3734 }
3735
3736 #[test]
3737 fn test_peer_disconnected_before_funding_broadcasted() {
3738         // Test that channels are closed with `ClosureReason::DisconnectedPeer` if the peer disconnects
3739         // before the funding transaction has been broadcasted, and doesn't reconnect back within time.
3740         let chanmon_cfgs = create_chanmon_cfgs(2);
3741         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3742         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3743         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3744
3745         // Open a channel between `nodes[0]` and `nodes[1]`, for which the funding transaction is never
3746         // broadcasted, even though it's created by `nodes[0]`.
3747         let expected_temporary_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap();
3748         let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
3749         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
3750         let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
3751         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
3752
3753         let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
3754         assert_eq!(temporary_channel_id, expected_temporary_channel_id);
3755
3756         assert!(nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
3757
3758         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
3759         assert_eq!(funding_created_msg.temporary_channel_id, expected_temporary_channel_id);
3760
3761         // Even though the funding transaction is created by `nodes[0]`, the `FundingCreated` msg is
3762         // never sent to `nodes[1]`, and therefore the tx is never signed by either party nor
3763         // broadcasted.
3764         {
3765                 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
3766         }
3767
3768         // The peers disconnect before the funding is broadcasted.
3769         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3770         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3771
3772         // The time for peers to reconnect expires.
3773         for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS {
3774                 nodes[0].node.timer_tick_occurred();
3775         }
3776
3777         // Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` and a
3778         // `DiscardFunding` event when the peers are disconnected and do not reconnect before the
3779         // funding transaction is broadcasted.
3780         check_closed_event!(&nodes[0], 2, ClosureReason::DisconnectedPeer, true
3781                 , [nodes[1].node.get_our_node_id()], 1000000);
3782         check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false
3783                 , [nodes[0].node.get_our_node_id()], 1000000);
3784 }
3785
3786 #[test]
3787 fn test_simple_peer_disconnect() {
3788         // Test that we can reconnect when there are no lost messages
3789         let chanmon_cfgs = create_chanmon_cfgs(3);
3790         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3791         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3792         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3793         create_announced_chan_between_nodes(&nodes, 0, 1);
3794         create_announced_chan_between_nodes(&nodes, 1, 2);
3795
3796         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3797         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3798         let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3799         reconnect_args.send_channel_ready = (true, true);
3800         reconnect_nodes(reconnect_args);
3801
3802         let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
3803         let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3804         fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2);
3805         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1);
3806
3807         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3808         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3809         reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3810
3811         let (payment_preimage_3, payment_hash_3, ..) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000);
3812         let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
3813         let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3814         let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3815
3816         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3817         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3818
3819         claim_payment_along_route(
3820                 ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage_3)
3821                         .skip_last(true)
3822         );
3823         fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5);
3824
3825         let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3826         reconnect_args.pending_cell_htlc_fails.0 = 1;
3827         reconnect_args.pending_cell_htlc_claims.0 = 1;
3828         reconnect_nodes(reconnect_args);
3829         {
3830                 let events = nodes[0].node.get_and_clear_pending_events();
3831                 assert_eq!(events.len(), 4);
3832                 match events[0] {
3833                         Event::PaymentSent { payment_preimage, payment_hash, .. } => {
3834                                 assert_eq!(payment_preimage, payment_preimage_3);
3835                                 assert_eq!(payment_hash, payment_hash_3);
3836                         },
3837                         _ => panic!("Unexpected event"),
3838                 }
3839                 match events[1] {
3840                         Event::PaymentPathSuccessful { .. } => {},
3841                         _ => panic!("Unexpected event"),
3842                 }
3843                 match events[2] {
3844                         Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } => {
3845                                 assert_eq!(payment_hash, payment_hash_5);
3846                                 assert!(payment_failed_permanently);
3847                         },
3848                         _ => panic!("Unexpected event"),
3849                 }
3850                 match events[3] {
3851                         Event::PaymentFailed { payment_hash, .. } => {
3852                                 assert_eq!(payment_hash, payment_hash_5);
3853                         },
3854                         _ => panic!("Unexpected event"),
3855                 }
3856         }
3857         check_added_monitors(&nodes[0], 1);
3858
3859         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4);
3860         fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6);
3861 }
3862
3863 fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken_lnd: bool) {
3864         // Test that we can reconnect when in-flight HTLC updates get dropped
3865         let chanmon_cfgs = create_chanmon_cfgs(2);
3866         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3867         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3868         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3869
3870         let mut as_channel_ready = None;
3871         let channel_id = if messages_delivered == 0 {
3872                 let (channel_ready, chan_id, _) = create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001);
3873                 as_channel_ready = Some(channel_ready);
3874                 // nodes[1] doesn't receive the channel_ready message (it'll be re-sent on reconnect)
3875                 // Note that we store it so that if we're running with `simulate_broken_lnd` we can deliver
3876                 // it before the channel_reestablish message.
3877                 chan_id
3878         } else {
3879                 create_announced_chan_between_nodes(&nodes, 0, 1).2
3880         };
3881
3882         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000);
3883
3884         let payment_event = {
3885                 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
3886                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
3887                 check_added_monitors!(nodes[0], 1);
3888
3889                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3890                 assert_eq!(events.len(), 1);
3891                 SendEvent::from_event(events.remove(0))
3892         };
3893         assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id);
3894
3895         if messages_delivered < 2 {
3896                 // Drop the payment_event messages, and let them get re-generated in reconnect_nodes!
3897         } else {
3898                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
3899                 if messages_delivered >= 3 {
3900                         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
3901                         check_added_monitors!(nodes[1], 1);
3902                         let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
3903
3904                         if messages_delivered >= 4 {
3905                                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
3906                                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3907                                 check_added_monitors!(nodes[0], 1);
3908
3909                                 if messages_delivered >= 5 {
3910                                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
3911                                         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
3912                                         // No commitment_signed so get_event_msg's assert(len == 1) passes
3913                                         check_added_monitors!(nodes[0], 1);
3914
3915                                         if messages_delivered >= 6 {
3916                                                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
3917                                                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3918                                                 check_added_monitors!(nodes[1], 1);
3919                                         }
3920                                 }
3921                         }
3922                 }
3923         }
3924
3925         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3926         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3927         if messages_delivered < 3 {
3928                 if simulate_broken_lnd {
3929                         // lnd has a long-standing bug where they send a channel_ready prior to a
3930                         // channel_reestablish if you reconnect prior to channel_ready time.
3931                         //
3932                         // Here we simulate that behavior, delivering a channel_ready immediately on
3933                         // reconnect. Note that we don't bother skipping the now-duplicate channel_ready sent
3934                         // in `reconnect_nodes` but we currently don't fail based on that.
3935                         //
3936                         // See-also <https://github.com/lightningnetwork/lnd/issues/4006>
3937                         nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready.as_ref().unwrap().0);
3938                 }
3939                 // Even if the channel_ready messages get exchanged, as long as nothing further was
3940                 // received on either side, both sides will need to resend them.
3941                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3942                 reconnect_args.send_channel_ready = (true, true);
3943                 reconnect_args.pending_htlc_adds.1 = 1;
3944                 reconnect_nodes(reconnect_args);
3945         } else if messages_delivered == 3 {
3946                 // nodes[0] still wants its RAA + commitment_signed
3947                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3948                 reconnect_args.pending_responding_commitment_signed.0 = true;
3949                 reconnect_args.pending_raa.0 = true;
3950                 reconnect_nodes(reconnect_args);
3951         } else if messages_delivered == 4 {
3952                 // nodes[0] still wants its commitment_signed
3953                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3954                 reconnect_args.pending_responding_commitment_signed.0 = true;
3955                 reconnect_nodes(reconnect_args);
3956         } else if messages_delivered == 5 {
3957                 // nodes[1] still wants its final RAA
3958                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3959                 reconnect_args.pending_raa.1 = true;
3960                 reconnect_nodes(reconnect_args);
3961         } else if messages_delivered == 6 {
3962                 // Everything was delivered...
3963                 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3964         }
3965
3966         let events_1 = nodes[1].node.get_and_clear_pending_events();
3967         if messages_delivered == 0 {
3968                 assert_eq!(events_1.len(), 2);
3969                 match events_1[0] {
3970                         Event::ChannelReady { .. } => { },
3971                         _ => panic!("Unexpected event"),
3972                 };
3973                 match events_1[1] {
3974                         Event::PendingHTLCsForwardable { .. } => { },
3975                         _ => panic!("Unexpected event"),
3976                 };
3977         } else {
3978                 assert_eq!(events_1.len(), 1);
3979                 match events_1[0] {
3980                         Event::PendingHTLCsForwardable { .. } => { },
3981                         _ => panic!("Unexpected event"),
3982                 };
3983         }
3984
3985         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3986         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3987         reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3988
3989         nodes[1].node.process_pending_htlc_forwards();
3990
3991         let events_2 = nodes[1].node.get_and_clear_pending_events();
3992         assert_eq!(events_2.len(), 1);
3993         match events_2[0] {
3994                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
3995                         assert_eq!(payment_hash_1, *payment_hash);
3996                         assert_eq!(amount_msat, 1_000_000);
3997                         assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
3998                         assert_eq!(via_channel_id, Some(channel_id));
3999                         match &purpose {
4000                                 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
4001                                         assert!(payment_preimage.is_none());
4002                                         assert_eq!(payment_secret_1, *payment_secret);
4003                                 },
4004                                 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
4005                         }
4006                 },
4007                 _ => panic!("Unexpected event"),
4008         }
4009
4010         nodes[1].node.claim_funds(payment_preimage_1);
4011         check_added_monitors!(nodes[1], 1);
4012         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
4013
4014         let events_3 = nodes[1].node.get_and_clear_pending_msg_events();
4015         assert_eq!(events_3.len(), 1);
4016         let (update_fulfill_htlc, commitment_signed) = match events_3[0] {
4017                 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
4018                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
4019                         assert!(updates.update_add_htlcs.is_empty());
4020                         assert!(updates.update_fail_htlcs.is_empty());
4021                         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
4022                         assert!(updates.update_fail_malformed_htlcs.is_empty());
4023                         assert!(updates.update_fee.is_none());
4024                         (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
4025                 },
4026                 _ => panic!("Unexpected event"),
4027         };
4028
4029         if messages_delivered >= 1 {
4030                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlc);
4031
4032                 let events_4 = nodes[0].node.get_and_clear_pending_events();
4033                 assert_eq!(events_4.len(), 1);
4034                 match events_4[0] {
4035                         Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
4036                                 assert_eq!(payment_preimage_1, *payment_preimage);
4037                                 assert_eq!(payment_hash_1, *payment_hash);
4038                         },
4039                         _ => panic!("Unexpected event"),
4040                 }
4041
4042                 if messages_delivered >= 2 {
4043                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
4044                         check_added_monitors!(nodes[0], 1);
4045                         let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4046
4047                         if messages_delivered >= 3 {
4048                                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
4049                                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4050                                 check_added_monitors!(nodes[1], 1);
4051
4052                                 if messages_delivered >= 4 {
4053                                         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed);
4054                                         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4055                                         // No commitment_signed so get_event_msg's assert(len == 1) passes
4056                                         check_added_monitors!(nodes[1], 1);
4057
4058                                         if messages_delivered >= 5 {
4059                                                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
4060                                                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4061                                                 check_added_monitors!(nodes[0], 1);
4062                                         }
4063                                 }
4064                         }
4065                 }
4066         }
4067
4068         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
4069         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
4070         if messages_delivered < 2 {
4071                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4072                 reconnect_args.pending_htlc_claims.0 = 1;
4073                 reconnect_nodes(reconnect_args);
4074                 if messages_delivered < 1 {
4075                         expect_payment_sent!(nodes[0], payment_preimage_1);
4076                 } else {
4077                         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4078                 }
4079         } else if messages_delivered == 2 {
4080                 // nodes[0] still wants its RAA + commitment_signed
4081                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4082                 reconnect_args.pending_responding_commitment_signed.1 = true;
4083                 reconnect_args.pending_raa.1 = true;
4084                 reconnect_nodes(reconnect_args);
4085         } else if messages_delivered == 3 {
4086                 // nodes[0] still wants its commitment_signed
4087                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4088                 reconnect_args.pending_responding_commitment_signed.1 = true;
4089                 reconnect_nodes(reconnect_args);
4090         } else if messages_delivered == 4 {
4091                 // nodes[1] still wants its final RAA
4092                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4093                 reconnect_args.pending_raa.0 = true;
4094                 reconnect_nodes(reconnect_args);
4095         } else if messages_delivered == 5 {
4096                 // Everything was delivered...
4097                 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
4098         }
4099
4100         if messages_delivered == 1 || messages_delivered == 2 {
4101                 expect_payment_path_successful!(nodes[0]);
4102         }
4103         if messages_delivered <= 5 {
4104                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
4105                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
4106         }
4107         reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
4108
4109         if messages_delivered > 2 {
4110                 expect_payment_path_successful!(nodes[0]);
4111         }
4112
4113         // Channel should still work fine...
4114         let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
4115         let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
4116         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
4117 }
4118
4119 #[test]
4120 fn test_drop_messages_peer_disconnect_a() {
4121         do_test_drop_messages_peer_disconnect(0, true);
4122         do_test_drop_messages_peer_disconnect(0, false);
4123         do_test_drop_messages_peer_disconnect(1, false);
4124         do_test_drop_messages_peer_disconnect(2, false);
4125 }
4126
4127 #[test]
4128 fn test_drop_messages_peer_disconnect_b() {
4129         do_test_drop_messages_peer_disconnect(3, false);
4130         do_test_drop_messages_peer_disconnect(4, false);
4131         do_test_drop_messages_peer_disconnect(5, false);
4132         do_test_drop_messages_peer_disconnect(6, false);
4133 }
4134
4135 #[test]
4136 fn test_channel_ready_without_best_block_updated() {
4137         // Previously, if we were offline when a funding transaction was locked in, and then we came
4138         // back online, calling best_block_updated once followed by transactions_confirmed, we'd not
4139         // generate a channel_ready until a later best_block_updated. This tests that we generate the
4140         // channel_ready immediately instead.
4141         let chanmon_cfgs = create_chanmon_cfgs(2);
4142         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4143         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4144         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4145         *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
4146
4147         let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
4148
4149         let conf_height = nodes[0].best_block_info().1 + 1;
4150         connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
4151         let block_txn = [funding_tx];
4152         let conf_txn: Vec<_> = block_txn.iter().enumerate().collect();
4153         let conf_block_header = nodes[0].get_block_header(conf_height);
4154         nodes[0].node.transactions_confirmed(&conf_block_header, &conf_txn[..], conf_height);
4155
4156         // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
4157         let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
4158         nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
4159 }
4160
4161 #[test]
4162 fn test_channel_monitor_skipping_block_when_channel_manager_is_leading() {
4163         let chanmon_cfgs = create_chanmon_cfgs(2);
4164         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4165         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4166         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4167
4168         // Let channel_manager get ahead of chain_monitor by 1 block.
4169         // This is to emulate race-condition where newly added channel_monitor skips processing 1 block,
4170         // in case where client calls block_connect on channel_manager first and then on chain_monitor.
4171         let height_1 = nodes[0].best_block_info().1 + 1;
4172         let mut block_1 = create_dummy_block(nodes[0].best_block_hash(), height_1, Vec::new());
4173
4174         nodes[0].blocks.lock().unwrap().push((block_1.clone(), height_1));
4175         nodes[0].node.block_connected(&block_1, height_1);
4176
4177         // Create channel, and it gets added to chain_monitor in funding_created.
4178         let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
4179
4180         // Now, newly added channel_monitor in chain_monitor hasn't processed block_1,
4181         // but it's best_block is block_1, since that was populated by channel_manager, and channel_manager
4182         // was running ahead of chain_monitor at the time of funding_created.
4183         // Later on, subsequent blocks are connected to both channel_manager and chain_monitor.
4184         // Hence, this channel's channel_monitor skipped block_1, directly tries to process subsequent blocks.
4185         confirm_transaction_at(&nodes[0], &funding_tx, nodes[0].best_block_info().1 + 1);
4186         connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
4187
4188         // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
4189         let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
4190         nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
4191 }
4192
4193 #[test]
4194 fn test_channel_monitor_skipping_block_when_channel_manager_is_lagging() {
4195         let chanmon_cfgs = create_chanmon_cfgs(2);
4196         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4197         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4198         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4199
4200         // Let chain_monitor get ahead of channel_manager by 1 block.
4201         // This is to emulate race-condition where newly added channel_monitor skips processing 1 block,
4202         // in case where client calls block_connect on chain_monitor first and then on channel_manager.
4203         let height_1 = nodes[0].best_block_info().1 + 1;
4204         let mut block_1 = create_dummy_block(nodes[0].best_block_hash(), height_1, Vec::new());
4205
4206         nodes[0].blocks.lock().unwrap().push((block_1.clone(), height_1));
4207         nodes[0].chain_monitor.chain_monitor.block_connected(&block_1, height_1);
4208
4209         // Create channel, and it gets added to chain_monitor in funding_created.
4210         let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
4211
4212         // channel_manager can't really skip block_1, it should get it eventually.
4213         nodes[0].node.block_connected(&block_1, height_1);
4214
4215         // Now, newly added channel_monitor in chain_monitor hasn't processed block_1, it's best_block is
4216         // the block before block_1, since that was populated by channel_manager, and channel_manager was
4217         // running behind at the time of funding_created.
4218         // Later on, subsequent blocks are connected to both channel_manager and chain_monitor.
4219         // Hence, this channel's channel_monitor skipped block_1, directly tries to process subsequent blocks.
4220         confirm_transaction_at(&nodes[0], &funding_tx, nodes[0].best_block_info().1 + 1);
4221         connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
4222
4223         // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
4224         let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
4225         nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
4226 }
4227
4228 #[test]
4229 fn test_drop_messages_peer_disconnect_dual_htlc() {
4230         // Test that we can handle reconnecting when both sides of a channel have pending
4231         // commitment_updates when we disconnect.
4232         let chanmon_cfgs = create_chanmon_cfgs(2);
4233         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4234         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4235         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4236         create_announced_chan_between_nodes(&nodes, 0, 1);
4237
4238         let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
4239
4240         // Now try to send a second payment which will fail to send
4241         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
4242         nodes[0].node.send_payment_with_route(&route, payment_hash_2,
4243                 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
4244         check_added_monitors!(nodes[0], 1);
4245
4246         let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
4247         assert_eq!(events_1.len(), 1);
4248         match events_1[0] {
4249                 MessageSendEvent::UpdateHTLCs { .. } => {},
4250                 _ => panic!("Unexpected event"),
4251         }
4252
4253         nodes[1].node.claim_funds(payment_preimage_1);
4254         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
4255         check_added_monitors!(nodes[1], 1);
4256
4257         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
4258         assert_eq!(events_2.len(), 1);
4259         match events_2[0] {
4260                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
4261                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
4262                         assert!(update_add_htlcs.is_empty());
4263                         assert_eq!(update_fulfill_htlcs.len(), 1);
4264                         assert!(update_fail_htlcs.is_empty());
4265                         assert!(update_fail_malformed_htlcs.is_empty());
4266                         assert!(update_fee.is_none());
4267
4268                         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
4269                         let events_3 = nodes[0].node.get_and_clear_pending_events();
4270                         assert_eq!(events_3.len(), 1);
4271                         match events_3[0] {
4272                                 Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
4273                                         assert_eq!(*payment_preimage, payment_preimage_1);
4274                                         assert_eq!(*payment_hash, payment_hash_1);
4275                                 },
4276                                 _ => panic!("Unexpected event"),
4277                         }
4278
4279                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
4280                         let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4281                         // No commitment_signed so get_event_msg's assert(len == 1) passes
4282                         check_added_monitors!(nodes[0], 1);
4283                 },
4284                 _ => panic!("Unexpected event"),
4285         }
4286
4287         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
4288         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
4289
4290         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
4291                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
4292         }, true).unwrap();
4293         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
4294         assert_eq!(reestablish_1.len(), 1);
4295         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
4296                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
4297         }, false).unwrap();
4298         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
4299         assert_eq!(reestablish_2.len(), 1);
4300
4301         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
4302         let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
4303         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
4304         let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
4305
4306         assert!(as_resp.0.is_none());
4307         assert!(bs_resp.0.is_none());
4308
4309         assert!(bs_resp.1.is_none());
4310         assert!(bs_resp.2.is_none());
4311
4312         assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
4313
4314         assert_eq!(as_resp.2.as_ref().unwrap().update_add_htlcs.len(), 1);
4315         assert!(as_resp.2.as_ref().unwrap().update_fulfill_htlcs.is_empty());
4316         assert!(as_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
4317         assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
4318         assert!(as_resp.2.as_ref().unwrap().update_fee.is_none());
4319         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]);
4320         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed);
4321         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4322         // No commitment_signed so get_event_msg's assert(len == 1) passes
4323         check_added_monitors!(nodes[1], 1);
4324
4325         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap());
4326         let bs_second_commitment_signed = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4327         assert!(bs_second_commitment_signed.update_add_htlcs.is_empty());
4328         assert!(bs_second_commitment_signed.update_fulfill_htlcs.is_empty());
4329         assert!(bs_second_commitment_signed.update_fail_htlcs.is_empty());
4330         assert!(bs_second_commitment_signed.update_fail_malformed_htlcs.is_empty());
4331         assert!(bs_second_commitment_signed.update_fee.is_none());
4332         check_added_monitors!(nodes[1], 1);
4333
4334         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
4335         let as_commitment_signed = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4336         assert!(as_commitment_signed.update_add_htlcs.is_empty());
4337         assert!(as_commitment_signed.update_fulfill_htlcs.is_empty());
4338         assert!(as_commitment_signed.update_fail_htlcs.is_empty());
4339         assert!(as_commitment_signed.update_fail_malformed_htlcs.is_empty());
4340         assert!(as_commitment_signed.update_fee.is_none());
4341         check_added_monitors!(nodes[0], 1);
4342
4343         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed);
4344         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4345         // No commitment_signed so get_event_msg's assert(len == 1) passes
4346         check_added_monitors!(nodes[0], 1);
4347
4348         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed);
4349         let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4350         // No commitment_signed so get_event_msg's assert(len == 1) passes
4351         check_added_monitors!(nodes[1], 1);
4352
4353         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
4354         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4355         check_added_monitors!(nodes[1], 1);
4356
4357         expect_pending_htlcs_forwardable!(nodes[1]);
4358
4359         let events_5 = nodes[1].node.get_and_clear_pending_events();
4360         assert_eq!(events_5.len(), 1);
4361         match events_5[0] {
4362                 Event::PaymentClaimable { ref payment_hash, ref purpose, .. } => {
4363                         assert_eq!(payment_hash_2, *payment_hash);
4364                         match &purpose {
4365                                 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
4366                                         assert!(payment_preimage.is_none());
4367                                         assert_eq!(payment_secret_2, *payment_secret);
4368                                 },
4369                                 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
4370                         }
4371                 },
4372                 _ => panic!("Unexpected event"),
4373         }
4374
4375         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack);
4376         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4377         check_added_monitors!(nodes[0], 1);
4378
4379         expect_payment_path_successful!(nodes[0]);
4380         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
4381 }
4382
4383 fn do_test_htlc_timeout(send_partial_mpp: bool) {
4384         // If the user fails to claim/fail an HTLC within the HTLC CLTV timeout we fail it for them
4385         // to avoid our counterparty failing the channel.
4386         let chanmon_cfgs = create_chanmon_cfgs(2);
4387         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4388         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4389         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4390
4391         create_announced_chan_between_nodes(&nodes, 0, 1);
4392
4393         let our_payment_hash = if send_partial_mpp {
4394                 let (route, our_payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100000);
4395                 // Use the utility function send_payment_along_path to send the payment with MPP data which
4396                 // indicates there are more HTLCs coming.
4397                 let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
4398                 let payment_id = PaymentId([42; 32]);
4399                 let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
4400                         RecipientOnionFields::secret_only(payment_secret), payment_id, &route).unwrap();
4401                 nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash,
4402                         RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id,
4403                         &None, session_privs[0]).unwrap();
4404                 check_added_monitors!(nodes[0], 1);
4405                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
4406                 assert_eq!(events.len(), 1);
4407                 // Now do the relevant commitment_signed/RAA dances along the path, noting that the final
4408                 // hop should *not* yet generate any PaymentClaimable event(s).
4409                 pass_along_path(&nodes[0], &[&nodes[1]], 100000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
4410                 our_payment_hash
4411         } else {
4412                 route_payment(&nodes[0], &[&nodes[1]], 100000).1
4413         };
4414
4415         let mut block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new());
4416         connect_block(&nodes[0], &block);
4417         connect_block(&nodes[1], &block);
4418         let block_count = TEST_FINAL_CLTV + CHAN_CONFIRM_DEPTH + 2 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS;
4419         for _ in CHAN_CONFIRM_DEPTH + 2..block_count {
4420                 block.header.prev_blockhash = block.block_hash();
4421                 connect_block(&nodes[0], &block);
4422                 connect_block(&nodes[1], &block);
4423         }
4424
4425         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
4426
4427         check_added_monitors!(nodes[1], 1);
4428         let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4429         assert!(htlc_timeout_updates.update_add_htlcs.is_empty());
4430         assert_eq!(htlc_timeout_updates.update_fail_htlcs.len(), 1);
4431         assert!(htlc_timeout_updates.update_fail_malformed_htlcs.is_empty());
4432         assert!(htlc_timeout_updates.update_fee.is_none());
4433
4434         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_timeout_updates.update_fail_htlcs[0]);
4435         commitment_signed_dance!(nodes[0], nodes[1], htlc_timeout_updates.commitment_signed, false);
4436         // 100_000 msat as u64, followed by the height at which we failed back above
4437         let mut expected_failure_data = (100_000 as u64).to_be_bytes().to_vec();
4438         expected_failure_data.extend_from_slice(&(block_count - 1).to_be_bytes());
4439         expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000 | 15, &expected_failure_data[..]);
4440 }
4441
4442 #[test]
4443 fn test_htlc_timeout() {
4444         do_test_htlc_timeout(true);
4445         do_test_htlc_timeout(false);
4446 }
4447
4448 fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) {
4449         // Tests that HTLCs in the holding cell are timed out after the requisite number of blocks.
4450         let chanmon_cfgs = create_chanmon_cfgs(3);
4451         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
4452         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
4453         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
4454         create_announced_chan_between_nodes(&nodes, 0, 1);
4455         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4456
4457         // Make sure all nodes are at the same starting height
4458         connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
4459         connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
4460         connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
4461
4462         // Route a first payment to get the 1 -> 2 channel in awaiting_raa...
4463         let (route, first_payment_hash, _, first_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
4464         nodes[1].node.send_payment_with_route(&route, first_payment_hash,
4465                 RecipientOnionFields::secret_only(first_payment_secret), PaymentId(first_payment_hash.0)).unwrap();
4466         assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1);
4467         check_added_monitors!(nodes[1], 1);
4468
4469         // Now attempt to route a second payment, which should be placed in the holding cell
4470         let sending_node = if forwarded_htlc { &nodes[0] } else { &nodes[1] };
4471         let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(sending_node, nodes[2], 100000);
4472         sending_node.node.send_payment_with_route(&route, second_payment_hash,
4473                 RecipientOnionFields::secret_only(second_payment_secret), PaymentId(second_payment_hash.0)).unwrap();
4474         if forwarded_htlc {
4475                 check_added_monitors!(nodes[0], 1);
4476                 let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
4477                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
4478                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
4479                 expect_pending_htlcs_forwardable!(nodes[1]);
4480         }
4481         check_added_monitors!(nodes[1], 0);
4482
4483         connect_blocks(&nodes[1], TEST_FINAL_CLTV - LATENCY_GRACE_PERIOD_BLOCKS);
4484         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4485         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
4486         connect_blocks(&nodes[1], 1);
4487
4488         if forwarded_htlc {
4489                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
4490                 check_added_monitors!(nodes[1], 1);
4491                 let fail_commit = nodes[1].node.get_and_clear_pending_msg_events();
4492                 assert_eq!(fail_commit.len(), 1);
4493                 match fail_commit[0] {
4494                         MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, ref commitment_signed, .. }, .. } => {
4495                                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
4496                                 commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, true, true);
4497                         },
4498                         _ => unreachable!(),
4499                 }
4500                 expect_payment_failed_with_update!(nodes[0], second_payment_hash, false, chan_2.0.contents.short_channel_id, false);
4501         } else {
4502                 expect_payment_failed!(nodes[1], second_payment_hash, false);
4503         }
4504 }
4505
4506 #[test]
4507 fn test_holding_cell_htlc_add_timeouts() {
4508         do_test_holding_cell_htlc_add_timeouts(false);
4509         do_test_holding_cell_htlc_add_timeouts(true);
4510 }
4511
4512 macro_rules! check_spendable_outputs {
4513         ($node: expr, $keysinterface: expr) => {
4514                 {
4515                         let mut events = $node.chain_monitor.chain_monitor.get_and_clear_pending_events();
4516                         let mut txn = Vec::new();
4517                         let mut all_outputs = Vec::new();
4518                         let secp_ctx = Secp256k1::new();
4519                         for event in events.drain(..) {
4520                                 match event {
4521                                         Event::SpendableOutputs { mut outputs, channel_id: _ } => {
4522                                                 for outp in outputs.drain(..) {
4523                                                         txn.push($keysinterface.backing.spend_spendable_outputs(&[&outp], Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx).unwrap());
4524                                                         all_outputs.push(outp);
4525                                                 }
4526                                         },
4527                                         _ => panic!("Unexpected event"),
4528                                 };
4529                         }
4530                         if all_outputs.len() > 1 {
4531                                 if let Ok(tx) = $keysinterface.backing.spend_spendable_outputs(&all_outputs.iter().map(|a| a).collect::<Vec<_>>(), Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx) {
4532                                         txn.push(tx);
4533                                 }
4534                         }
4535                         txn
4536                 }
4537         }
4538 }
4539
4540 #[test]
4541 fn test_claim_sizeable_push_msat() {
4542         // Incidentally test SpendableOutput event generation due to detection of to_local output on commitment tx
4543         let chanmon_cfgs = create_chanmon_cfgs(2);
4544         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4545         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4546         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4547
4548         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000);
4549         let error_message = "Channel force-closed";
4550         nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
4551         check_closed_broadcast!(nodes[1], true);
4552         check_added_monitors!(nodes[1], 1);
4553         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[0].node.get_our_node_id()], 100000);
4554         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4555         assert_eq!(node_txn.len(), 1);
4556         check_spends!(node_txn[0], chan.3);
4557         assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
4558
4559         mine_transaction(&nodes[1], &node_txn[0]);
4560         connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1);
4561
4562         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4563         assert_eq!(spend_txn.len(), 1);
4564         assert_eq!(spend_txn[0].input.len(), 1);
4565         check_spends!(spend_txn[0], node_txn[0]);
4566         assert_eq!(spend_txn[0].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
4567 }
4568
4569 #[test]
4570 fn test_claim_on_remote_sizeable_push_msat() {
4571         // Same test as previous, just test on remote commitment tx, as per_commitment_point registration changes following you're funder/fundee and
4572         // to_remote output is encumbered by a P2WPKH
4573         let chanmon_cfgs = create_chanmon_cfgs(2);
4574         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4575         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4576         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4577         let error_message = "Channel force-closed";
4578
4579         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000);
4580         nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
4581         check_closed_broadcast!(nodes[0], true);
4582         check_added_monitors!(nodes[0], 1);
4583         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
4584
4585         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4586         assert_eq!(node_txn.len(), 1);
4587         check_spends!(node_txn[0], chan.3);
4588         assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
4589
4590         mine_transaction(&nodes[1], &node_txn[0]);
4591         check_closed_broadcast!(nodes[1], true);
4592         check_added_monitors!(nodes[1], 1);
4593         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4594         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4595
4596         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4597         assert_eq!(spend_txn.len(), 1);
4598         check_spends!(spend_txn[0], node_txn[0]);
4599 }
4600
4601 #[test]
4602 fn test_claim_on_remote_revoked_sizeable_push_msat() {
4603         // Same test as previous, just test on remote revoked commitment tx, as per_commitment_point registration changes following you're funder/fundee and
4604         // to_remote output is encumbered by a P2WPKH
4605
4606         let chanmon_cfgs = create_chanmon_cfgs(2);
4607         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4608         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4609         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4610
4611         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 59000000);
4612         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4613         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan.2);
4614         assert_eq!(revoked_local_txn[0].input.len(), 1);
4615         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
4616
4617         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4618         mine_transaction(&nodes[1], &revoked_local_txn[0]);
4619         check_closed_broadcast!(nodes[1], true);
4620         check_added_monitors!(nodes[1], 1);
4621         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4622
4623         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4624         mine_transaction(&nodes[1], &node_txn[0]);
4625         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4626
4627         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4628         assert_eq!(spend_txn.len(), 3);
4629         check_spends!(spend_txn[0], revoked_local_txn[0]); // to_remote output on revoked remote commitment_tx
4630         check_spends!(spend_txn[1], node_txn[0]);
4631         check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[0]); // Both outputs
4632 }
4633
4634 #[test]
4635 fn test_static_spendable_outputs_preimage_tx() {
4636         let chanmon_cfgs = create_chanmon_cfgs(2);
4637         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4638         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4639         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4640
4641         // Create some initial channels
4642         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4643
4644         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
4645
4646         let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
4647         assert_eq!(commitment_tx[0].input.len(), 1);
4648         assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
4649
4650         // Settle A's commitment tx on B's chain
4651         nodes[1].node.claim_funds(payment_preimage);
4652         expect_payment_claimed!(nodes[1], payment_hash, 3_000_000);
4653         check_added_monitors!(nodes[1], 1);
4654         mine_transaction(&nodes[1], &commitment_tx[0]);
4655         check_added_monitors!(nodes[1], 1);
4656         let events = nodes[1].node.get_and_clear_pending_msg_events();
4657         match events[0] {
4658                 MessageSendEvent::UpdateHTLCs { .. } => {},
4659                 _ => panic!("Unexpected event"),
4660         }
4661         match events[2] {
4662                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4663                 _ => panic!("Unexepected event"),
4664         }
4665
4666         // Check B's monitor was able to send back output descriptor event for preimage tx on A's commitment tx
4667         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: preimage tx
4668         assert_eq!(node_txn.len(), 1);
4669         check_spends!(node_txn[0], commitment_tx[0]);
4670         assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
4671
4672         mine_transaction(&nodes[1], &node_txn[0]);
4673         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4674         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4675
4676         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4677         assert_eq!(spend_txn.len(), 1);
4678         check_spends!(spend_txn[0], node_txn[0]);
4679 }
4680
4681 #[test]
4682 fn test_static_spendable_outputs_timeout_tx() {
4683         let chanmon_cfgs = create_chanmon_cfgs(2);
4684         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4685         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4686         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4687
4688         // Create some initial channels
4689         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4690
4691         // Rebalance the network a bit by relaying one payment through all the channels ...
4692         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
4693
4694         let (_, our_payment_hash, ..) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000);
4695
4696         let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
4697         assert_eq!(commitment_tx[0].input.len(), 1);
4698         assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
4699
4700         // Settle A's commitment tx on B' chain
4701         mine_transaction(&nodes[1], &commitment_tx[0]);
4702         check_added_monitors!(nodes[1], 1);
4703         let events = nodes[1].node.get_and_clear_pending_msg_events();
4704         match events[1] {
4705                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4706                 _ => panic!("Unexpected event"),
4707         }
4708         connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
4709
4710         // Check B's monitor was able to send back output descriptor event for timeout tx on A's commitment tx
4711         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4712         assert_eq!(node_txn.len(), 1); // ChannelMonitor: timeout tx
4713         check_spends!(node_txn[0],  commitment_tx[0].clone());
4714         assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4715
4716         mine_transaction(&nodes[1], &node_txn[0]);
4717         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4718         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4719         expect_payment_failed!(nodes[1], our_payment_hash, false);
4720
4721         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4722         assert_eq!(spend_txn.len(), 3); // SpendableOutput: remote_commitment_tx.to_remote, timeout_tx.output
4723         check_spends!(spend_txn[0], commitment_tx[0]);
4724         check_spends!(spend_txn[1], node_txn[0]);
4725         check_spends!(spend_txn[2], node_txn[0], commitment_tx[0]); // All outputs
4726 }
4727
4728 #[test]
4729 fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
4730         let chanmon_cfgs = create_chanmon_cfgs(2);
4731         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4732         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4733         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4734
4735         // Create some initial channels
4736         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4737
4738         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4739         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
4740         assert_eq!(revoked_local_txn[0].input.len(), 1);
4741         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4742
4743         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4744
4745         mine_transaction(&nodes[1], &revoked_local_txn[0]);
4746         check_closed_broadcast!(nodes[1], true);
4747         check_added_monitors!(nodes[1], 1);
4748         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4749
4750         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4751         assert_eq!(node_txn.len(), 1);
4752         assert_eq!(node_txn[0].input.len(), 2);
4753         check_spends!(node_txn[0], revoked_local_txn[0]);
4754
4755         mine_transaction(&nodes[1], &node_txn[0]);
4756         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4757
4758         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4759         assert_eq!(spend_txn.len(), 1);
4760         check_spends!(spend_txn[0], node_txn[0]);
4761 }
4762
4763 #[test]
4764 fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
4765         let mut chanmon_cfgs = create_chanmon_cfgs(2);
4766         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
4767         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4768         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4769         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4770
4771         // Create some initial channels
4772         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4773
4774         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4775         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
4776         assert_eq!(revoked_local_txn[0].input.len(), 1);
4777         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4778
4779         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4780
4781         // A will generate HTLC-Timeout from revoked commitment tx
4782         mine_transaction(&nodes[0], &revoked_local_txn[0]);
4783         check_closed_broadcast!(nodes[0], true);
4784         check_added_monitors!(nodes[0], 1);
4785         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
4786         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
4787
4788         let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4789         assert_eq!(revoked_htlc_txn.len(), 1);
4790         assert_eq!(revoked_htlc_txn[0].input.len(), 1);
4791         assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
4792         check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]);
4793         assert_ne!(revoked_htlc_txn[0].lock_time, LockTime::ZERO); // HTLC-Timeout
4794
4795         // B will generate justice tx from A's revoked commitment/HTLC tx
4796         connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
4797         check_closed_broadcast!(nodes[1], true);
4798         check_added_monitors!(nodes[1], 1);
4799         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4800
4801         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4802         assert_eq!(node_txn.len(), 2); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs
4803         // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0]
4804         // including the one already spent by revoked_htlc_txn[1]. That's OK, we'll spend with valid
4805         // transactions next...
4806         assert_eq!(node_txn[0].input.len(), 3);
4807         check_spends!(node_txn[0], revoked_local_txn[0], revoked_htlc_txn[0]);
4808
4809         assert_eq!(node_txn[1].input.len(), 2);
4810         check_spends!(node_txn[1], revoked_local_txn[0], revoked_htlc_txn[0]);
4811         if node_txn[1].input[1].previous_output.txid == revoked_htlc_txn[0].txid() {
4812                 assert_ne!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4813         } else {
4814                 assert_eq!(node_txn[1].input[0].previous_output.txid, revoked_htlc_txn[0].txid());
4815                 assert_ne!(node_txn[1].input[1].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4816         }
4817
4818         mine_transaction(&nodes[1], &node_txn[1]);
4819         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4820
4821         // Check B's ChannelMonitor was able to generate the right spendable output descriptor
4822         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4823         assert_eq!(spend_txn.len(), 1);
4824         assert_eq!(spend_txn[0].input.len(), 1);
4825         check_spends!(spend_txn[0], node_txn[1]);
4826 }
4827
4828 #[test]
4829 fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
4830         let mut chanmon_cfgs = create_chanmon_cfgs(2);
4831         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
4832         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4833         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4834         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4835
4836         // Create some initial channels
4837         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4838
4839         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4840         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
4841         assert_eq!(revoked_local_txn[0].input.len(), 1);
4842         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4843
4844         // The to-be-revoked commitment tx should have one HTLC and one to_remote output
4845         assert_eq!(revoked_local_txn[0].output.len(), 2);
4846
4847         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4848
4849         // B will generate HTLC-Success from revoked commitment tx
4850         mine_transaction(&nodes[1], &revoked_local_txn[0]);
4851         check_closed_broadcast!(nodes[1], true);
4852         check_added_monitors!(nodes[1], 1);
4853         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4854         let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4855
4856         assert_eq!(revoked_htlc_txn.len(), 1);
4857         assert_eq!(revoked_htlc_txn[0].input.len(), 1);
4858         assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4859         check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]);
4860
4861         // Check that the unspent (of two) outputs on revoked_local_txn[0] is a P2WPKH:
4862         let unspent_local_txn_output = revoked_htlc_txn[0].input[0].previous_output.vout as usize ^ 1;
4863         assert_eq!(revoked_local_txn[0].output[unspent_local_txn_output].script_pubkey.len(), 2 + 20); // P2WPKH
4864
4865         // A will generate justice tx from B's revoked commitment/HTLC tx
4866         connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
4867         check_closed_broadcast!(nodes[0], true);
4868         check_added_monitors!(nodes[0], 1);
4869         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
4870
4871         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4872         assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success
4873
4874         // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0]
4875         // including the one already spent by revoked_htlc_txn[0]. That's OK, we'll spend with valid
4876         // transactions next...
4877         assert_eq!(node_txn[0].input.len(), 2);
4878         check_spends!(node_txn[0], revoked_local_txn[0], revoked_htlc_txn[0]);
4879         if node_txn[0].input[1].previous_output.txid == revoked_htlc_txn[0].txid() {
4880                 assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4881         } else {
4882                 assert_eq!(node_txn[0].input[0].previous_output.txid, revoked_htlc_txn[0].txid());
4883                 assert_eq!(node_txn[0].input[1].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4884         }
4885
4886         assert_eq!(node_txn[1].input.len(), 1);
4887         check_spends!(node_txn[1], revoked_htlc_txn[0]);
4888
4889         mine_transaction(&nodes[0], &node_txn[1]);
4890         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
4891
4892         // Note that nodes[0]'s tx_broadcaster is still locked, so if we get here the channelmonitor
4893         // didn't try to generate any new transactions.
4894
4895         // Check A's ChannelMonitor was able to generate the right spendable output descriptor
4896         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
4897         assert_eq!(spend_txn.len(), 3);
4898         assert_eq!(spend_txn[0].input.len(), 1);
4899         check_spends!(spend_txn[0], revoked_local_txn[0]); // spending to_remote output from revoked local tx
4900         assert_ne!(spend_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4901         check_spends!(spend_txn[1], node_txn[1]); // spending justice tx output on the htlc success tx
4902         check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[1]); // Both outputs
4903 }
4904
4905 #[test]
4906 fn test_onchain_to_onchain_claim() {
4907         // Test that in case of channel closure, we detect the state of output and claim HTLC
4908         // on downstream peer's remote commitment tx.
4909         // First, have C claim an HTLC against its own latest commitment transaction.
4910         // Then, broadcast these to B, which should update the monitor downstream on the A<->B
4911         // channel.
4912         // Finally, check that B will claim the HTLC output if A's latest commitment transaction
4913         // gets broadcast.
4914
4915         let chanmon_cfgs = create_chanmon_cfgs(3);
4916         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
4917         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
4918         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
4919
4920         // Create some initial channels
4921         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4922         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4923
4924         // Ensure all nodes are at the same height
4925         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
4926         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
4927         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
4928         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
4929
4930         // Rebalance the network a bit by relaying one payment through all the channels ...
4931         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
4932         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
4933
4934         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
4935         let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
4936         check_spends!(commitment_tx[0], chan_2.3);
4937         nodes[2].node.claim_funds(payment_preimage);
4938         expect_payment_claimed!(nodes[2], payment_hash, 3_000_000);
4939         check_added_monitors!(nodes[2], 1);
4940         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
4941         assert!(updates.update_add_htlcs.is_empty());
4942         assert!(updates.update_fail_htlcs.is_empty());
4943         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
4944         assert!(updates.update_fail_malformed_htlcs.is_empty());
4945
4946         mine_transaction(&nodes[2], &commitment_tx[0]);
4947         check_closed_broadcast!(nodes[2], true);
4948         check_added_monitors!(nodes[2], 1);
4949         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
4950
4951         let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 1 (HTLC-Success tx)
4952         assert_eq!(c_txn.len(), 1);
4953         check_spends!(c_txn[0], commitment_tx[0]);
4954         assert_eq!(c_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4955         assert!(c_txn[0].output[0].script_pubkey.is_p2wsh()); // revokeable output
4956         assert_eq!(c_txn[0].lock_time, LockTime::ZERO); // Success tx
4957
4958         // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor
4959         connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), c_txn[0].clone()]));
4960         check_added_monitors!(nodes[1], 1);
4961         let events = nodes[1].node.get_and_clear_pending_events();
4962         assert_eq!(events.len(), 2);
4963         match events[0] {
4964                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
4965                 _ => panic!("Unexpected event"),
4966         }
4967         match events[1] {
4968                 Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
4969                         next_channel_id, outbound_amount_forwarded_msat, ..
4970                 } => {
4971                         assert_eq!(total_fee_earned_msat, Some(1000));
4972                         assert_eq!(prev_channel_id, Some(chan_1.2));
4973                         assert_eq!(claim_from_onchain_tx, true);
4974                         assert_eq!(next_channel_id, Some(chan_2.2));
4975                         assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
4976                 },
4977                 _ => panic!("Unexpected event"),
4978         }
4979         check_added_monitors!(nodes[1], 1);
4980         let mut msg_events = nodes[1].node.get_and_clear_pending_msg_events();
4981         assert_eq!(msg_events.len(), 3);
4982         let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut msg_events);
4983         let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut msg_events);
4984
4985         match nodes_2_event {
4986                 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id: _ } => {},
4987                 _ => panic!("Unexpected event"),
4988         }
4989
4990         match nodes_0_event {
4991                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
4992                         assert!(update_add_htlcs.is_empty());
4993                         assert!(update_fail_htlcs.is_empty());
4994                         assert_eq!(update_fulfill_htlcs.len(), 1);
4995                         assert!(update_fail_malformed_htlcs.is_empty());
4996                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
4997                 },
4998                 _ => panic!("Unexpected event"),
4999         };
5000
5001         // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
5002         match msg_events[0] {
5003                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
5004                 _ => panic!("Unexpected event"),
5005         }
5006
5007         // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
5008         let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
5009         mine_transaction(&nodes[1], &commitment_tx[0]);
5010         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
5011         let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
5012         // ChannelMonitor: HTLC-Success tx
5013         assert_eq!(b_txn.len(), 1);
5014         check_spends!(b_txn[0], commitment_tx[0]);
5015         assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5016         assert!(b_txn[0].output[0].script_pubkey.is_p2wpkh()); // direct payment
5017         assert_eq!(b_txn[0].lock_time.to_consensus_u32(), nodes[1].best_block_info().1); // Success tx
5018
5019         check_closed_broadcast!(nodes[1], true);
5020         check_added_monitors!(nodes[1], 1);
5021 }
5022
5023 #[test]
5024 fn test_duplicate_payment_hash_one_failure_one_success() {
5025         // Topology : A --> B --> C --> D
5026         // We route 2 payments with same hash between B and C, one will be timeout, the other successfully claim
5027         // Note that because C will refuse to generate two payment secrets for the same payment hash,
5028         // we forward one of the payments onwards to D.
5029         let chanmon_cfgs = create_chanmon_cfgs(4);
5030         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
5031         // When this test was written, the default base fee floated based on the HTLC count.
5032         // It is now fixed, so we simply set the fee to the expected value here.
5033         let mut config = test_default_channel_config();
5034         config.channel_config.forwarding_fee_base_msat = 196;
5035         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs,
5036                 &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
5037         let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
5038
5039         create_announced_chan_between_nodes(&nodes, 0, 1);
5040         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
5041         create_announced_chan_between_nodes(&nodes, 2, 3);
5042
5043         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
5044         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
5045         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
5046         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
5047         connect_blocks(&nodes[3], node_max_height - nodes[3].best_block_info().1);
5048
5049         let (our_payment_preimage, duplicate_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 900_000);
5050
5051         let payment_secret = nodes[3].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200, None).unwrap();
5052         // We reduce the final CLTV here by a somewhat arbitrary constant to keep it under the one-byte
5053         // script push size limit so that the below script length checks match
5054         // ACCEPTED_HTLC_SCRIPT_WEIGHT.
5055         let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV - 40)
5056                 .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap();
5057         let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[3], payment_params, 800_000);
5058         send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[2], &nodes[3]]], 800_000, duplicate_payment_hash, payment_secret);
5059
5060         let commitment_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
5061         assert_eq!(commitment_txn[0].input.len(), 1);
5062         check_spends!(commitment_txn[0], chan_2.3);
5063
5064         mine_transaction(&nodes[1], &commitment_txn[0]);
5065         check_closed_broadcast!(nodes[1], true);
5066         check_added_monitors!(nodes[1], 1);
5067         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
5068         connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
5069
5070         let htlc_timeout_tx;
5071         { // Extract one of the two HTLC-Timeout transaction
5072                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5073                 // ChannelMonitor: timeout tx * 2-or-3
5074                 assert!(node_txn.len() == 2 || node_txn.len() == 3);
5075
5076                 check_spends!(node_txn[0], commitment_txn[0]);
5077                 assert_eq!(node_txn[0].input.len(), 1);
5078                 assert_eq!(node_txn[0].output.len(), 1);
5079
5080                 if node_txn.len() > 2 {
5081                         check_spends!(node_txn[1], commitment_txn[0]);
5082                         assert_eq!(node_txn[1].input.len(), 1);
5083                         assert_eq!(node_txn[1].output.len(), 1);
5084                         assert_eq!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
5085
5086                         check_spends!(node_txn[2], commitment_txn[0]);
5087                         assert_eq!(node_txn[2].input.len(), 1);
5088                         assert_eq!(node_txn[2].output.len(), 1);
5089                         assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output);
5090                 } else {
5091                         check_spends!(node_txn[1], commitment_txn[0]);
5092                         assert_eq!(node_txn[1].input.len(), 1);
5093                         assert_eq!(node_txn[1].output.len(), 1);
5094                         assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
5095                 }
5096
5097                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5098                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5099                 // Assign htlc_timeout_tx to the forwarded HTLC (with value ~800 sats). The received HTLC
5100                 // (with value 900 sats) will be claimed in the below `claim_funds` call.
5101                 if node_txn.len() > 2 {
5102                         assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5103                         htlc_timeout_tx = if node_txn[2].output[0].value.to_sat() < 900 { node_txn[2].clone() } else { node_txn[0].clone() };
5104                 } else {
5105                         htlc_timeout_tx = if node_txn[0].output[0].value.to_sat() < 900 { node_txn[1].clone() } else { node_txn[0].clone() };
5106                 }
5107         }
5108
5109         nodes[2].node.claim_funds(our_payment_preimage);
5110         expect_payment_claimed!(nodes[2], duplicate_payment_hash, 900_000);
5111
5112         mine_transaction(&nodes[2], &commitment_txn[0]);
5113         check_added_monitors!(nodes[2], 2);
5114         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
5115         let events = nodes[2].node.get_and_clear_pending_msg_events();
5116         match events[0] {
5117                 MessageSendEvent::UpdateHTLCs { .. } => {},
5118                 _ => panic!("Unexpected event"),
5119         }
5120         match events[2] {
5121                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
5122                 _ => panic!("Unexepected event"),
5123         }
5124         let htlc_success_txn: Vec<_> = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
5125         assert_eq!(htlc_success_txn.len(), 2); // ChannelMonitor: HTLC-Success txn (*2 due to 2-HTLC outputs)
5126         check_spends!(htlc_success_txn[0], commitment_txn[0]);
5127         check_spends!(htlc_success_txn[1], commitment_txn[0]);
5128         assert_eq!(htlc_success_txn[0].input.len(), 1);
5129         assert_eq!(htlc_success_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5130         assert_eq!(htlc_success_txn[1].input.len(), 1);
5131         assert_eq!(htlc_success_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5132         assert_ne!(htlc_success_txn[0].input[0].previous_output, htlc_success_txn[1].input[0].previous_output);
5133         assert_ne!(htlc_success_txn[1].input[0].previous_output, htlc_timeout_tx.input[0].previous_output);
5134
5135         mine_transaction(&nodes[1], &htlc_timeout_tx);
5136         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5137         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
5138         let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5139         assert!(htlc_updates.update_add_htlcs.is_empty());
5140         assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
5141         let first_htlc_id = htlc_updates.update_fail_htlcs[0].htlc_id;
5142         assert!(htlc_updates.update_fulfill_htlcs.is_empty());
5143         assert!(htlc_updates.update_fail_malformed_htlcs.is_empty());
5144         check_added_monitors!(nodes[1], 1);
5145
5146         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
5147         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
5148         {
5149                 commitment_signed_dance!(nodes[0], nodes[1], &htlc_updates.commitment_signed, false, true);
5150         }
5151         expect_payment_failed_with_update!(nodes[0], duplicate_payment_hash, false, chan_2.0.contents.short_channel_id, true);
5152
5153         // Solve 2nd HTLC by broadcasting on B's chain HTLC-Success Tx from C
5154         mine_transaction(&nodes[1], &htlc_success_txn[1]);
5155         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(196), true, true);
5156         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5157         assert!(updates.update_add_htlcs.is_empty());
5158         assert!(updates.update_fail_htlcs.is_empty());
5159         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
5160         assert_ne!(updates.update_fulfill_htlcs[0].htlc_id, first_htlc_id);
5161         assert!(updates.update_fail_malformed_htlcs.is_empty());
5162         check_added_monitors!(nodes[1], 1);
5163
5164         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
5165         commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false);
5166         expect_payment_sent(&nodes[0], our_payment_preimage, None, true, true);
5167 }
5168
5169 #[test]
5170 fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
5171         let chanmon_cfgs = create_chanmon_cfgs(2);
5172         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5173         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5174         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5175
5176         // Create some initial channels
5177         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5178
5179         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
5180         let local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
5181         assert_eq!(local_txn.len(), 1);
5182         assert_eq!(local_txn[0].input.len(), 1);
5183         check_spends!(local_txn[0], chan_1.3);
5184
5185         // Give B knowledge of preimage to be able to generate a local HTLC-Success Tx
5186         nodes[1].node.claim_funds(payment_preimage);
5187         expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
5188         check_added_monitors!(nodes[1], 1);
5189
5190         mine_transaction(&nodes[1], &local_txn[0]);
5191         check_added_monitors!(nodes[1], 1);
5192         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
5193         let events = nodes[1].node.get_and_clear_pending_msg_events();
5194         match events[0] {
5195                 MessageSendEvent::UpdateHTLCs { .. } => {},
5196                 _ => panic!("Unexpected event"),
5197         }
5198         match events[2] {
5199                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
5200                 _ => panic!("Unexepected event"),
5201         }
5202         let node_tx = {
5203                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5204                 assert_eq!(node_txn.len(), 1);
5205                 assert_eq!(node_txn[0].input.len(), 1);
5206                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5207                 check_spends!(node_txn[0], local_txn[0]);
5208                 node_txn[0].clone()
5209         };
5210
5211         mine_transaction(&nodes[1], &node_tx);
5212         connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1);
5213
5214         // Verify that B is able to spend its own HTLC-Success tx thanks to spendable output event given back by its ChannelMonitor
5215         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5216         assert_eq!(spend_txn.len(), 1);
5217         assert_eq!(spend_txn[0].input.len(), 1);
5218         check_spends!(spend_txn[0], node_tx);
5219         assert_eq!(spend_txn[0].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5220 }
5221
5222 fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, announce_latest: bool) {
5223         // Test that we fail backwards the full set of HTLCs we need to when remote broadcasts an
5224         // unrevoked commitment transaction.
5225         // This includes HTLCs which were below the dust threshold as well as HTLCs which were awaiting
5226         // a remote RAA before they could be failed backwards (and combinations thereof).
5227         // We also test duplicate-hash HTLCs by adding two nodes on each side of the target nodes which
5228         // use the same payment hashes.
5229         // Thus, we use a six-node network:
5230         //
5231         // A \         / E
5232         //    - C - D -
5233         // B /         \ F
5234         // And test where C fails back to A/B when D announces its latest commitment transaction
5235         let chanmon_cfgs = create_chanmon_cfgs(6);
5236         let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
5237         // When this test was written, the default base fee floated based on the HTLC count.
5238         // It is now fixed, so we simply set the fee to the expected value here.
5239         let mut config = test_default_channel_config();
5240         config.channel_config.forwarding_fee_base_msat = 196;
5241         let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs,
5242                 &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
5243         let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
5244
5245         let _chan_0_2 = create_announced_chan_between_nodes(&nodes, 0, 2);
5246         let _chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
5247         let chan_2_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
5248         let chan_3_4 = create_announced_chan_between_nodes(&nodes, 3, 4);
5249         let chan_3_5  = create_announced_chan_between_nodes(&nodes, 3, 5);
5250
5251         // Rebalance and check output sanity...
5252         send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000);
5253         send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000);
5254         assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2);
5255
5256         let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id())
5257                 .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context().holder_dust_limit_satoshis;
5258         // 0th HTLC:
5259         let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5260         // 1st HTLC:
5261         let (_, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5262         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
5263         // 2nd HTLC:
5264         send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_1, nodes[5].node.create_inbound_payment_for_hash(payment_hash_1, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5265         // 3rd HTLC:
5266         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_2, nodes[5].node.create_inbound_payment_for_hash(payment_hash_2, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5267         // 4th HTLC:
5268         let (_, payment_hash_3, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5269         // 5th HTLC:
5270         let (_, payment_hash_4, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5271         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
5272         // 6th HTLC:
5273         send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_3, nodes[5].node.create_inbound_payment_for_hash(payment_hash_3, None, 7200, None).unwrap());
5274         // 7th HTLC:
5275         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_4, nodes[5].node.create_inbound_payment_for_hash(payment_hash_4, None, 7200, None).unwrap());
5276
5277         // 8th HTLC:
5278         let (_, payment_hash_5, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5279         // 9th HTLC:
5280         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
5281         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_5, nodes[5].node.create_inbound_payment_for_hash(payment_hash_5, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5282
5283         // 10th HTLC:
5284         let (_, payment_hash_6, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5285         // 11th HTLC:
5286         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
5287         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_6, nodes[5].node.create_inbound_payment_for_hash(payment_hash_6, None, 7200, None).unwrap());
5288
5289         // Double-check that six of the new HTLC were added
5290         // We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie,
5291         // with to_local and to_remote outputs, 8 outputs and 6 HTLCs not included).
5292         assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2).len(), 1);
5293         assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 8);
5294
5295         // Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go.
5296         // Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs
5297         nodes[4].node.fail_htlc_backwards(&payment_hash_1);
5298         nodes[4].node.fail_htlc_backwards(&payment_hash_3);
5299         nodes[4].node.fail_htlc_backwards(&payment_hash_5);
5300         nodes[4].node.fail_htlc_backwards(&payment_hash_6);
5301         check_added_monitors!(nodes[4], 0);
5302
5303         let failed_destinations = vec![
5304                 HTLCDestination::FailedPayment { payment_hash: payment_hash_1 },
5305                 HTLCDestination::FailedPayment { payment_hash: payment_hash_3 },
5306                 HTLCDestination::FailedPayment { payment_hash: payment_hash_5 },
5307                 HTLCDestination::FailedPayment { payment_hash: payment_hash_6 },
5308         ];
5309         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[4], failed_destinations);
5310         check_added_monitors!(nodes[4], 1);
5311
5312         let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id());
5313         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[0]);
5314         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[1]);
5315         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[2]);
5316         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[3]);
5317         commitment_signed_dance!(nodes[3], nodes[4], four_removes.commitment_signed, false);
5318
5319         // Fail 3rd below-dust and 7th above-dust HTLCs
5320         nodes[5].node.fail_htlc_backwards(&payment_hash_2);
5321         nodes[5].node.fail_htlc_backwards(&payment_hash_4);
5322         check_added_monitors!(nodes[5], 0);
5323
5324         let failed_destinations_2 = vec![
5325                 HTLCDestination::FailedPayment { payment_hash: payment_hash_2 },
5326                 HTLCDestination::FailedPayment { payment_hash: payment_hash_4 },
5327         ];
5328         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[5], failed_destinations_2);
5329         check_added_monitors!(nodes[5], 1);
5330
5331         let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id());
5332         nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[0]);
5333         nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]);
5334         commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false);
5335
5336         let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
5337
5338         // After 4 and 2 removes respectively above in nodes[4] and nodes[5], nodes[3] should receive 6 PaymentForwardedFailed events
5339         let failed_destinations_3 = vec![
5340                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5341                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5342                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5343                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5344                 HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
5345                 HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
5346         ];
5347         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations_3);
5348         check_added_monitors!(nodes[3], 1);
5349         let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
5350         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]);
5351         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[1]);
5352         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[2]);
5353         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[3]);
5354         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[4]);
5355         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[5]);
5356         if deliver_last_raa {
5357                 commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false);
5358         } else {
5359                 let _cs_last_raa = commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false, true, false, true);
5360         }
5361
5362         // D's latest commitment transaction now contains 1st + 2nd + 9th HTLCs (implicitly, they're
5363         // below the dust limit) and the 5th + 6th + 11th HTLCs. It has failed back the 0th, 3rd, 4th,
5364         // 7th, 8th, and 10th, but as we haven't yet delivered the final RAA to C, the fails haven't
5365         // propagated back to A/B yet (and D has two unrevoked commitment transactions).
5366         //
5367         // We now broadcast the latest commitment transaction, which *should* result in failures for
5368         // the 0th, 1st, 2nd, 3rd, 4th, 7th, 8th, 9th, and 10th HTLCs, ie all the below-dust HTLCs and
5369         // the non-broadcast above-dust HTLCs.
5370         //
5371         // Alternatively, we may broadcast the previous commitment transaction, which should only
5372         // result in failures for the below-dust HTLCs, ie the 0th, 1st, 2nd, 3rd, 9th, and 10th HTLCs.
5373         let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
5374
5375         if announce_latest {
5376                 mine_transaction(&nodes[2], &ds_last_commitment_tx[0]);
5377         } else {
5378                 mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]);
5379         }
5380         let events = nodes[2].node.get_and_clear_pending_events();
5381         let close_event = if deliver_last_raa {
5382                 assert_eq!(events.len(), 2 + 6);
5383                 events.last().clone().unwrap()
5384         } else {
5385                 assert_eq!(events.len(), 1);
5386                 events.last().clone().unwrap()
5387         };
5388         match close_event {
5389                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
5390                 _ => panic!("Unexpected event"),
5391         }
5392
5393         connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
5394         check_closed_broadcast!(nodes[2], true);
5395         if deliver_last_raa {
5396                 expect_pending_htlcs_forwardable_from_events!(nodes[2], events[1..2], true);
5397
5398                 let expected_destinations: Vec<HTLCDestination> = repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect();
5399                 expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations);
5400         } else {
5401                 let expected_destinations: Vec<HTLCDestination> = if announce_latest {
5402                         repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(9).collect()
5403                 } else {
5404                         repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(6).collect()
5405                 };
5406
5407                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], expected_destinations);
5408         }
5409         check_added_monitors!(nodes[2], 3);
5410
5411         let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events();
5412         assert_eq!(cs_msgs.len(), 2);
5413         let mut a_done = false;
5414         for msg in cs_msgs {
5415                 match msg {
5416                         MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
5417                                 // Both under-dust HTLCs and the one above-dust HTLC that we had already failed
5418                                 // should be failed-backwards here.
5419                                 let target = if *node_id == nodes[0].node.get_our_node_id() {
5420                                         // If announce_latest, expect 0th, 1st, 4th, 8th, 10th HTLCs, else only 0th, 1st, 10th below-dust HTLCs
5421                                         for htlc in &updates.update_fail_htlcs {
5422                                                 assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 6 || if announce_latest { htlc.htlc_id == 3 || htlc.htlc_id == 5 } else { false });
5423                                         }
5424                                         assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 5 } else { 3 });
5425                                         assert!(!a_done);
5426                                         a_done = true;
5427                                         &nodes[0]
5428                                 } else {
5429                                         // If announce_latest, expect 2nd, 3rd, 7th, 9th HTLCs, else only 2nd, 3rd, 9th below-dust HTLCs
5430                                         for htlc in &updates.update_fail_htlcs {
5431                                                 assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 5 || if announce_latest { htlc.htlc_id == 4 } else { false });
5432                                         }
5433                                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
5434                                         assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 4 } else { 3 });
5435                                         &nodes[1]
5436                                 };
5437                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
5438                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[1]);
5439                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[2]);
5440                                 if announce_latest {
5441                                         target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[3]);
5442                                         if *node_id == nodes[0].node.get_our_node_id() {
5443                                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[4]);
5444                                         }
5445                                 }
5446                                 commitment_signed_dance!(target, nodes[2], updates.commitment_signed, false, true);
5447                         },
5448                         _ => panic!("Unexpected event"),
5449                 }
5450         }
5451
5452         let as_events = nodes[0].node.get_and_clear_pending_events();
5453         assert_eq!(as_events.len(), if announce_latest { 10 } else { 6 });
5454         let mut as_faileds = new_hash_set();
5455         let mut as_updates = 0;
5456         for event in as_events.iter() {
5457                 if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
5458                         assert!(as_faileds.insert(*payment_hash));
5459                         if *payment_hash != payment_hash_2 {
5460                                 assert_eq!(*payment_failed_permanently, deliver_last_raa);
5461                         } else {
5462                                 assert!(!payment_failed_permanently);
5463                         }
5464                         if let PathFailure::OnPath { network_update: Some(_) } = failure {
5465                                 as_updates += 1;
5466                         }
5467                 } else if let &Event::PaymentFailed { .. } = event {
5468                 } else { panic!("Unexpected event"); }
5469         }
5470         assert!(as_faileds.contains(&payment_hash_1));
5471         assert!(as_faileds.contains(&payment_hash_2));
5472         if announce_latest {
5473                 assert!(as_faileds.contains(&payment_hash_3));
5474                 assert!(as_faileds.contains(&payment_hash_5));
5475         }
5476         assert!(as_faileds.contains(&payment_hash_6));
5477
5478         let bs_events = nodes[1].node.get_and_clear_pending_events();
5479         assert_eq!(bs_events.len(), if announce_latest { 8 } else { 6 });
5480         let mut bs_faileds = new_hash_set();
5481         let mut bs_updates = 0;
5482         for event in bs_events.iter() {
5483                 if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
5484                         assert!(bs_faileds.insert(*payment_hash));
5485                         if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 {
5486                                 assert_eq!(*payment_failed_permanently, deliver_last_raa);
5487                         } else {
5488                                 assert!(!payment_failed_permanently);
5489                         }
5490                         if let PathFailure::OnPath { network_update: Some(_) } = failure {
5491                                 bs_updates += 1;
5492                         }
5493                 } else if let &Event::PaymentFailed { .. } = event {
5494                 } else { panic!("Unexpected event"); }
5495         }
5496         assert!(bs_faileds.contains(&payment_hash_1));
5497         assert!(bs_faileds.contains(&payment_hash_2));
5498         if announce_latest {
5499                 assert!(bs_faileds.contains(&payment_hash_4));
5500         }
5501         assert!(bs_faileds.contains(&payment_hash_5));
5502
5503         // For each HTLC which was not failed-back by normal process (ie deliver_last_raa), we should
5504         // get a NetworkUpdate. A should have gotten 4 HTLCs which were failed-back due to
5505         // unknown-preimage-etc, B should have gotten 2. Thus, in the
5506         // announce_latest && deliver_last_raa case, we should have 5-4=1 and 4-2=2 NetworkUpdates.
5507         assert_eq!(as_updates, if deliver_last_raa { 1 } else if !announce_latest { 3 } else { 5 });
5508         assert_eq!(bs_updates, if deliver_last_raa { 2 } else if !announce_latest { 3 } else { 4 });
5509 }
5510
5511 #[test]
5512 fn test_fail_backwards_latest_remote_announce_a() {
5513         do_test_fail_backwards_unrevoked_remote_announce(false, true);
5514 }
5515
5516 #[test]
5517 fn test_fail_backwards_latest_remote_announce_b() {
5518         do_test_fail_backwards_unrevoked_remote_announce(true, true);
5519 }
5520
5521 #[test]
5522 fn test_fail_backwards_previous_remote_announce() {
5523         do_test_fail_backwards_unrevoked_remote_announce(false, false);
5524         // Note that true, true doesn't make sense as it implies we announce a revoked state, which is
5525         // tested for in test_commitment_revoked_fail_backward_exhaustive()
5526 }
5527
5528 #[test]
5529 fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
5530         let chanmon_cfgs = create_chanmon_cfgs(2);
5531         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5532         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5533         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5534
5535         // Create some initial channels
5536         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5537
5538         let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
5539         let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
5540         assert_eq!(local_txn[0].input.len(), 1);
5541         check_spends!(local_txn[0], chan_1.3);
5542
5543         // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
5544         mine_transaction(&nodes[0], &local_txn[0]);
5545         check_closed_broadcast!(nodes[0], true);
5546         check_added_monitors!(nodes[0], 1);
5547         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
5548         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
5549
5550         let htlc_timeout = {
5551                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5552                 assert_eq!(node_txn.len(), 1);
5553                 assert_eq!(node_txn[0].input.len(), 1);
5554                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5555                 check_spends!(node_txn[0], local_txn[0]);
5556                 node_txn[0].clone()
5557         };
5558
5559         mine_transaction(&nodes[0], &htlc_timeout);
5560         connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
5561         expect_payment_failed!(nodes[0], our_payment_hash, false);
5562
5563         // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
5564         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
5565         assert_eq!(spend_txn.len(), 3);
5566         check_spends!(spend_txn[0], local_txn[0]);
5567         assert_eq!(spend_txn[1].input.len(), 1);
5568         check_spends!(spend_txn[1], htlc_timeout);
5569         assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5570         assert_eq!(spend_txn[2].input.len(), 2);
5571         check_spends!(spend_txn[2], local_txn[0], htlc_timeout);
5572         assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 ||
5573                 spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32);
5574 }
5575
5576 #[test]
5577 fn test_key_derivation_params() {
5578         // This test is a copy of test_dynamic_spendable_outputs_local_htlc_timeout_tx, with a key
5579         // manager rotation to test that `channel_keys_id` returned in
5580         // [`SpendableOutputDescriptor::DelayedPaymentOutput`] let us re-derive the channel key set to
5581         // then derive a `delayed_payment_key`.
5582
5583         let chanmon_cfgs = create_chanmon_cfgs(3);
5584
5585         // We manually create the node configuration to backup the seed.
5586         let seed = [42; 32];
5587         let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
5588         let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager);
5589         let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[0].logger));
5590         let scorer = RwLock::new(test_utils::TestScorer::new());
5591         let router = test_utils::TestRouter::new(network_graph.clone(), &chanmon_cfgs[0].logger, &scorer);
5592         let message_router = test_utils::TestMessageRouter::new(network_graph.clone(), &keys_manager);
5593         let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, message_router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) };
5594         let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
5595         node_cfgs.remove(0);
5596         node_cfgs.insert(0, node);
5597
5598         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
5599         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
5600
5601         // Create some initial channels
5602         // Create a dummy channel to advance index by one and thus test re-derivation correctness
5603         // for node 0
5604         let chan_0 = create_announced_chan_between_nodes(&nodes, 0, 2);
5605         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5606         assert_ne!(chan_0.3.output[0].script_pubkey, chan_1.3.output[0].script_pubkey);
5607
5608         // Ensure all nodes are at the same height
5609         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
5610         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
5611         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
5612         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
5613
5614         let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
5615         let local_txn_0 = get_local_commitment_txn!(nodes[0], chan_0.2);
5616         let local_txn_1 = get_local_commitment_txn!(nodes[0], chan_1.2);
5617         assert_eq!(local_txn_1[0].input.len(), 1);
5618         check_spends!(local_txn_1[0], chan_1.3);
5619
5620         // We check funding pubkey are unique
5621         let (from_0_funding_key_0, from_0_funding_key_1) = (PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][36..69]));
5622         let (from_1_funding_key_0, from_1_funding_key_1) = (PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][36..69]));
5623         if from_0_funding_key_0 == from_1_funding_key_0
5624             || from_0_funding_key_0 == from_1_funding_key_1
5625             || from_0_funding_key_1 == from_1_funding_key_0
5626             || from_0_funding_key_1 == from_1_funding_key_1 {
5627                 panic!("Funding pubkeys aren't unique");
5628         }
5629
5630         // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
5631         mine_transaction(&nodes[0], &local_txn_1[0]);
5632         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
5633         check_closed_broadcast!(nodes[0], true);
5634         check_added_monitors!(nodes[0], 1);
5635         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
5636
5637         let htlc_timeout = {
5638                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5639                 assert_eq!(node_txn.len(), 1);
5640                 assert_eq!(node_txn[0].input.len(), 1);
5641                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5642                 check_spends!(node_txn[0], local_txn_1[0]);
5643                 node_txn[0].clone()
5644         };
5645
5646         mine_transaction(&nodes[0], &htlc_timeout);
5647         connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
5648         expect_payment_failed!(nodes[0], our_payment_hash, false);
5649
5650         // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
5651         let new_keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
5652         let spend_txn = check_spendable_outputs!(nodes[0], new_keys_manager);
5653         assert_eq!(spend_txn.len(), 3);
5654         check_spends!(spend_txn[0], local_txn_1[0]);
5655         assert_eq!(spend_txn[1].input.len(), 1);
5656         check_spends!(spend_txn[1], htlc_timeout);
5657         assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5658         assert_eq!(spend_txn[2].input.len(), 2);
5659         check_spends!(spend_txn[2], local_txn_1[0], htlc_timeout);
5660         assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 ||
5661                 spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32);
5662 }
5663
5664 #[test]
5665 fn test_static_output_closing_tx() {
5666         let chanmon_cfgs = create_chanmon_cfgs(2);
5667         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5668         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5669         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5670
5671         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5672
5673         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
5674         let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
5675
5676         mine_transaction(&nodes[0], &closing_tx);
5677         check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
5678         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
5679
5680         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
5681         assert_eq!(spend_txn.len(), 1);
5682         check_spends!(spend_txn[0], closing_tx);
5683
5684         mine_transaction(&nodes[1], &closing_tx);
5685         check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
5686         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5687
5688         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5689         assert_eq!(spend_txn.len(), 1);
5690         check_spends!(spend_txn[0], closing_tx);
5691 }
5692
5693 fn do_htlc_claim_local_commitment_only(use_dust: bool) {
5694         let chanmon_cfgs = create_chanmon_cfgs(2);
5695         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5696         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5697         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5698         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5699
5700         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3_000_000 });
5701
5702         // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being
5703         // present in B's local commitment transaction, but none of A's commitment transactions.
5704         nodes[1].node.claim_funds(payment_preimage);
5705         check_added_monitors!(nodes[1], 1);
5706         expect_payment_claimed!(nodes[1], payment_hash, if use_dust { 50000 } else { 3_000_000 });
5707
5708         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5709         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
5710         expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
5711
5712         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
5713         check_added_monitors!(nodes[0], 1);
5714         let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5715         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0);
5716         check_added_monitors!(nodes[1], 1);
5717
5718         let starting_block = nodes[1].best_block_info();
5719         let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
5720         for _ in starting_block.1 + 1..TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + starting_block.1 + 2 {
5721                 connect_block(&nodes[1], &block);
5722                 block.header.prev_blockhash = block.block_hash();
5723         }
5724         test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
5725         check_closed_broadcast!(nodes[1], true);
5726         check_added_monitors!(nodes[1], 1);
5727         check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 100000);
5728 }
5729
5730 fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
5731         let chanmon_cfgs = create_chanmon_cfgs(2);
5732         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5733         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5734         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5735         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5736
5737         let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], if use_dust { 50000 } else { 3000000 });
5738         nodes[0].node.send_payment_with_route(&route, payment_hash,
5739                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
5740         check_added_monitors!(nodes[0], 1);
5741
5742         let _as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5743
5744         // As far as A is concerned, the HTLC is now present only in the latest remote commitment
5745         // transaction, however it is not in A's latest local commitment, so we can just broadcast that
5746         // to "time out" the HTLC.
5747
5748         let starting_block = nodes[1].best_block_info();
5749         let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
5750
5751         for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + starting_block.1 + 2 {
5752                 connect_block(&nodes[0], &block);
5753                 block.header.prev_blockhash = block.block_hash();
5754         }
5755         test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
5756         check_closed_broadcast!(nodes[0], true);
5757         check_added_monitors!(nodes[0], 1);
5758         check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
5759 }
5760
5761 fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
5762         let chanmon_cfgs = create_chanmon_cfgs(3);
5763         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
5764         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
5765         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
5766         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5767
5768         // Fail the payment, but don't deliver A's final RAA, resulting in the HTLC only being present
5769         // in B's previous (unrevoked) commitment transaction, but none of A's commitment transactions.
5770         // Also optionally test that we *don't* fail the channel in case the commitment transaction was
5771         // actually revoked.
5772         let htlc_value = if use_dust { 50000 } else { 3000000 };
5773         let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
5774         nodes[1].node.fail_htlc_backwards(&our_payment_hash);
5775         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
5776         check_added_monitors!(nodes[1], 1);
5777
5778         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5779         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
5780         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
5781         check_added_monitors!(nodes[0], 1);
5782         let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5783         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0);
5784         check_added_monitors!(nodes[1], 1);
5785         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.1);
5786         check_added_monitors!(nodes[1], 1);
5787         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
5788
5789         if check_revoke_no_close {
5790                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
5791                 check_added_monitors!(nodes[0], 1);
5792         }
5793
5794         let starting_block = nodes[1].best_block_info();
5795         let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
5796         for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 2 {
5797                 connect_block(&nodes[0], &block);
5798                 block.header.prev_blockhash = block.block_hash();
5799         }
5800         if !check_revoke_no_close {
5801                 test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
5802                 check_closed_broadcast!(nodes[0], true);
5803                 check_added_monitors!(nodes[0], 1);
5804                 check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
5805         } else {
5806                 expect_payment_failed!(nodes[0], our_payment_hash, true);
5807         }
5808 }
5809
5810 // Test that we close channels on-chain when broadcastable HTLCs reach their timeout window.
5811 // There are only a few cases to test here:
5812 //  * its not really normative behavior, but we test that below-dust HTLCs "included" in
5813 //    broadcastable commitment transactions result in channel closure,
5814 //  * its included in an unrevoked-but-previous remote commitment transaction,
5815 //  * its included in the latest remote or local commitment transactions.
5816 // We test each of the three possible commitment transactions individually and use both dust and
5817 // non-dust HTLCs.
5818 // Note that we don't bother testing both outbound and inbound HTLC failures for each case, and we
5819 // assume they are handled the same across all six cases, as both outbound and inbound failures are
5820 // tested for at least one of the cases in other tests.
5821 #[test]
5822 fn htlc_claim_single_commitment_only_a() {
5823         do_htlc_claim_local_commitment_only(true);
5824         do_htlc_claim_local_commitment_only(false);
5825
5826         do_htlc_claim_current_remote_commitment_only(true);
5827         do_htlc_claim_current_remote_commitment_only(false);
5828 }
5829
5830 #[test]
5831 fn htlc_claim_single_commitment_only_b() {
5832         do_htlc_claim_previous_remote_commitment_only(true, false);
5833         do_htlc_claim_previous_remote_commitment_only(false, false);
5834         do_htlc_claim_previous_remote_commitment_only(true, true);
5835         do_htlc_claim_previous_remote_commitment_only(false, true);
5836 }
5837
5838 #[test]
5839 #[should_panic]
5840 fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be on its own as we are catching a panic
5841         let chanmon_cfgs = create_chanmon_cfgs(2);
5842         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5843         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5844         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5845         // Force duplicate randomness for every get-random call
5846         for node in nodes.iter() {
5847                 *node.keys_manager.override_random_bytes.lock().unwrap() = Some([0; 32]);
5848         }
5849
5850         // BOLT #2 spec: Sending node must ensure temporary_channel_id is unique from any other channel ID with the same peer.
5851         let channel_value_satoshis=10000;
5852         let push_msat=10001;
5853         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).unwrap();
5854         let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5855         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel);
5856         get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
5857
5858         // Create a second channel with the same random values. This used to panic due to a colliding
5859         // channel_id, but now panics due to a colliding outbound SCID alias.
5860         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err());
5861 }
5862
5863 #[test]
5864 fn bolt2_open_channel_sending_node_checks_part2() {
5865         let chanmon_cfgs = create_chanmon_cfgs(2);
5866         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5867         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5868         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5869
5870         // BOLT #2 spec: Sending node must set funding_satoshis to less than 2^24 satoshis
5871         let channel_value_satoshis=2^24;
5872         let push_msat=10001;
5873         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err());
5874
5875         // BOLT #2 spec: Sending node must set push_msat to equal or less than 1000 * funding_satoshis
5876         let channel_value_satoshis=10000;
5877         // Test when push_msat is equal to 1000 * funding_satoshis.
5878         let push_msat=1000*channel_value_satoshis+1;
5879         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err());
5880
5881         // BOLT #2 spec: Sending node must set set channel_reserve_satoshis greater than or equal to dust_limit_satoshis
5882         let channel_value_satoshis=10000;
5883         let push_msat=10001;
5884         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_ok()); //Create a valid channel
5885         let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5886         assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.common_fields.dust_limit_satoshis);
5887
5888         // BOLT #2 spec: Sending node must set undefined bits in channel_flags to 0
5889         // Only the least-significant bit of channel_flags is currently defined resulting in channel_flags only having one of two possible states 0 or 1
5890         assert!(node0_to_1_send_open_channel.common_fields.channel_flags<=1);
5891
5892         // BOLT #2 spec: Sending node should set to_self_delay sufficient to ensure the sender can irreversibly spend a commitment transaction output, in case of misbehaviour by the receiver.
5893         assert!(BREAKDOWN_TIMEOUT>0);
5894         assert!(node0_to_1_send_open_channel.common_fields.to_self_delay==BREAKDOWN_TIMEOUT);
5895
5896         // BOLT #2 spec: Sending node must ensure the chain_hash value identifies the chain it wishes to open the channel within.
5897         let chain_hash = ChainHash::using_genesis_block(Network::Testnet);
5898         assert_eq!(node0_to_1_send_open_channel.common_fields.chain_hash, chain_hash);
5899
5900         // BOLT #2 spec: Sending node must set funding_pubkey, revocation_basepoint, htlc_basepoint, payment_basepoint, and delayed_payment_basepoint to valid DER-encoded, compressed, secp256k1 pubkeys.
5901         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.funding_pubkey.serialize()).is_ok());
5902         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.revocation_basepoint.serialize()).is_ok());
5903         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.htlc_basepoint.serialize()).is_ok());
5904         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.payment_basepoint.serialize()).is_ok());
5905         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.delayed_payment_basepoint.serialize()).is_ok());
5906 }
5907
5908 #[test]
5909 fn bolt2_open_channel_sane_dust_limit() {
5910         let chanmon_cfgs = create_chanmon_cfgs(2);
5911         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5912         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5913         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5914
5915         let channel_value_satoshis=1000000;
5916         let push_msat=10001;
5917         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).unwrap();
5918         let mut node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5919         node0_to_1_send_open_channel.common_fields.dust_limit_satoshis = 547;
5920         node0_to_1_send_open_channel.channel_reserve_satoshis = 100001;
5921
5922         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel);
5923         let events = nodes[1].node.get_and_clear_pending_msg_events();
5924         let err_msg = match events[0] {
5925                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
5926                         msg.clone()
5927                 },
5928                 _ => panic!("Unexpected event"),
5929         };
5930         assert_eq!(err_msg.data, "dust_limit_satoshis (547) is greater than the implementation limit (546)");
5931 }
5932
5933 // Test that if we fail to send an HTLC that is being freed from the holding cell, and the HTLC
5934 // originated from our node, its failure is surfaced to the user. We trigger this failure to
5935 // free the HTLC by increasing our fee while the HTLC is in the holding cell such that the HTLC
5936 // is no longer affordable once it's freed.
5937 #[test]
5938 fn test_fail_holding_cell_htlc_upon_free() {
5939         let chanmon_cfgs = create_chanmon_cfgs(2);
5940         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5941         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5942         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5943         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
5944
5945         // First nodes[0] generates an update_fee, setting the channel's
5946         // pending_update_fee.
5947         {
5948                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
5949                 *feerate_lock += 20;
5950         }
5951         nodes[0].node.timer_tick_occurred();
5952         check_added_monitors!(nodes[0], 1);
5953
5954         let events = nodes[0].node.get_and_clear_pending_msg_events();
5955         assert_eq!(events.len(), 1);
5956         let (update_msg, commitment_signed) = match events[0] {
5957                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
5958                         (update_fee.as_ref(), commitment_signed)
5959                 },
5960                 _ => panic!("Unexpected event"),
5961         };
5962
5963         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
5964
5965         let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5966         let channel_reserve = chan_stat.channel_reserve_msat;
5967         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
5968         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
5969
5970         // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
5971         let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
5972         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
5973
5974         // Send a payment which passes reserve checks but gets stuck in the holding cell.
5975         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
5976                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
5977         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5978         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send);
5979
5980         // Flush the pending fee update.
5981         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
5982         let (as_revoke_and_ack, _) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5983         check_added_monitors!(nodes[1], 1);
5984         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack);
5985         check_added_monitors!(nodes[0], 1);
5986
5987         // Upon receipt of the RAA, there will be an attempt to resend the holding cell
5988         // HTLC, but now that the fee has been raised the payment will now fail, causing
5989         // us to surface its failure to the user.
5990         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5991         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
5992         nodes[0].logger.assert_log("lightning::ln::channel", format!("Freeing holding cell with 1 HTLC updates in channel {}", chan.2), 1);
5993
5994         // Check that the payment failed to be sent out.
5995         let events = nodes[0].node.get_and_clear_pending_events();
5996         assert_eq!(events.len(), 2);
5997         match &events[0] {
5998                 &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => {
5999                         assert_eq!(PaymentId(our_payment_hash.0), *payment_id.as_ref().unwrap());
6000                         assert_eq!(our_payment_hash.clone(), *payment_hash);
6001                         assert_eq!(*payment_failed_permanently, false);
6002                         assert_eq!(*short_channel_id, Some(route.paths[0].hops[0].short_channel_id));
6003                 },
6004                 _ => panic!("Unexpected event"),
6005         }
6006         match &events[1] {
6007                 &Event::PaymentFailed { ref payment_hash, .. } => {
6008                         assert_eq!(our_payment_hash.clone(), *payment_hash);
6009                 },
6010                 _ => panic!("Unexpected event"),
6011         }
6012 }
6013
6014 // Test that if multiple HTLCs are released from the holding cell and one is
6015 // valid but the other is no longer valid upon release, the valid HTLC can be
6016 // successfully completed while the other one fails as expected.
6017 #[test]
6018 fn test_free_and_fail_holding_cell_htlcs() {
6019         let chanmon_cfgs = create_chanmon_cfgs(2);
6020         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6021         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6022         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6023         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6024
6025         // First nodes[0] generates an update_fee, setting the channel's
6026         // pending_update_fee.
6027         {
6028                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
6029                 *feerate_lock += 200;
6030         }
6031         nodes[0].node.timer_tick_occurred();
6032         check_added_monitors!(nodes[0], 1);
6033
6034         let events = nodes[0].node.get_and_clear_pending_msg_events();
6035         assert_eq!(events.len(), 1);
6036         let (update_msg, commitment_signed) = match events[0] {
6037                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
6038                         (update_fee.as_ref(), commitment_signed)
6039                 },
6040                 _ => panic!("Unexpected event"),
6041         };
6042
6043         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
6044
6045         let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6046         let channel_reserve = chan_stat.channel_reserve_msat;
6047         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
6048         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
6049
6050         // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
6051         let amt_1 = 20000;
6052         let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features) - amt_1;
6053         let (route_1, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_1);
6054         let (route_2, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_2);
6055
6056         // Send 2 payments which pass reserve checks but get stuck in the holding cell.
6057         nodes[0].node.send_payment_with_route(&route_1, payment_hash_1,
6058                 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
6059         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6060         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1);
6061         let payment_id_2 = PaymentId(nodes[0].keys_manager.get_secure_random_bytes());
6062         nodes[0].node.send_payment_with_route(&route_2, payment_hash_2,
6063                 RecipientOnionFields::secret_only(payment_secret_2), payment_id_2).unwrap();
6064         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6065         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1 + amt_2);
6066
6067         // Flush the pending fee update.
6068         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
6069         let (revoke_and_ack, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6070         check_added_monitors!(nodes[1], 1);
6071         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_and_ack);
6072         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
6073         check_added_monitors!(nodes[0], 2);
6074
6075         // Upon receipt of the RAA, there will be an attempt to resend the holding cell HTLCs,
6076         // but now that the fee has been raised the second payment will now fail, causing us
6077         // to surface its failure to the user. The first payment should succeed.
6078         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6079         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
6080         nodes[0].logger.assert_log("lightning::ln::channel", format!("Freeing holding cell with 2 HTLC updates in channel {}", chan.2), 1);
6081
6082         // Check that the second payment failed to be sent out.
6083         let events = nodes[0].node.get_and_clear_pending_events();
6084         assert_eq!(events.len(), 2);
6085         match &events[0] {
6086                 &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => {
6087                         assert_eq!(payment_id_2, *payment_id.as_ref().unwrap());
6088                         assert_eq!(payment_hash_2.clone(), *payment_hash);
6089                         assert_eq!(*payment_failed_permanently, false);
6090                         assert_eq!(*short_channel_id, Some(route_2.paths[0].hops[0].short_channel_id));
6091                 },
6092                 _ => panic!("Unexpected event"),
6093         }
6094         match &events[1] {
6095                 &Event::PaymentFailed { ref payment_hash, .. } => {
6096                         assert_eq!(payment_hash_2.clone(), *payment_hash);
6097                 },
6098                 _ => panic!("Unexpected event"),
6099         }
6100
6101         // Complete the first payment and the RAA from the fee update.
6102         let (payment_event, send_raa_event) = {
6103                 let mut msgs = nodes[0].node.get_and_clear_pending_msg_events();
6104                 assert_eq!(msgs.len(), 2);
6105                 (SendEvent::from_event(msgs.remove(0)), msgs.remove(0))
6106         };
6107         let raa = match send_raa_event {
6108                 MessageSendEvent::SendRevokeAndACK { msg, .. } => msg,
6109                 _ => panic!("Unexpected event"),
6110         };
6111         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
6112         check_added_monitors!(nodes[1], 1);
6113         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6114         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6115         let events = nodes[1].node.get_and_clear_pending_events();
6116         assert_eq!(events.len(), 1);
6117         match events[0] {
6118                 Event::PendingHTLCsForwardable { .. } => {},
6119                 _ => panic!("Unexpected event"),
6120         }
6121         nodes[1].node.process_pending_htlc_forwards();
6122         let events = nodes[1].node.get_and_clear_pending_events();
6123         assert_eq!(events.len(), 1);
6124         match events[0] {
6125                 Event::PaymentClaimable { .. } => {},
6126                 _ => panic!("Unexpected event"),
6127         }
6128         nodes[1].node.claim_funds(payment_preimage_1);
6129         check_added_monitors!(nodes[1], 1);
6130         expect_payment_claimed!(nodes[1], payment_hash_1, amt_1);
6131
6132         let update_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6133         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msgs.update_fulfill_htlcs[0]);
6134         commitment_signed_dance!(nodes[0], nodes[1], update_msgs.commitment_signed, false, true);
6135         expect_payment_sent!(nodes[0], payment_preimage_1);
6136 }
6137
6138 // Test that if we fail to forward an HTLC that is being freed from the holding cell that the
6139 // HTLC is failed backwards. We trigger this failure to forward the freed HTLC by increasing
6140 // our fee while the HTLC is in the holding cell such that the HTLC is no longer affordable
6141 // once it's freed.
6142 #[test]
6143 fn test_fail_holding_cell_htlc_upon_free_multihop() {
6144         let chanmon_cfgs = create_chanmon_cfgs(3);
6145         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6146         // Avoid having to include routing fees in calculations
6147         let mut config = test_default_channel_config();
6148         config.channel_config.forwarding_fee_base_msat = 0;
6149         config.channel_config.forwarding_fee_proportional_millionths = 0;
6150         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
6151         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6152         let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6153         let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000);
6154
6155         // First nodes[1] generates an update_fee, setting the channel's
6156         // pending_update_fee.
6157         {
6158                 let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
6159                 *feerate_lock += 20;
6160         }
6161         nodes[1].node.timer_tick_occurred();
6162         check_added_monitors!(nodes[1], 1);
6163
6164         let events = nodes[1].node.get_and_clear_pending_msg_events();
6165         assert_eq!(events.len(), 1);
6166         let (update_msg, commitment_signed) = match events[0] {
6167                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
6168                         (update_fee.as_ref(), commitment_signed)
6169                 },
6170                 _ => panic!("Unexpected event"),
6171         };
6172
6173         nodes[2].node.handle_update_fee(&nodes[1].node.get_our_node_id(), update_msg.unwrap());
6174
6175         let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan_0_1.2);
6176         let channel_reserve = chan_stat.channel_reserve_msat;
6177         let feerate = get_feerate!(nodes[0], nodes[1], chan_0_1.2);
6178         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_0_1.2);
6179
6180         // Send a payment which passes reserve checks but gets stuck in the holding cell.
6181         let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
6182         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], max_can_send);
6183         let payment_event = {
6184                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6185                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6186                 check_added_monitors!(nodes[0], 1);
6187
6188                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6189                 assert_eq!(events.len(), 1);
6190
6191                 SendEvent::from_event(events.remove(0))
6192         };
6193         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6194         check_added_monitors!(nodes[1], 0);
6195         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6196         expect_pending_htlcs_forwardable!(nodes[1]);
6197
6198         chan_stat = get_channel_value_stat!(nodes[1], nodes[2], chan_1_2.2);
6199         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send);
6200
6201         // Flush the pending fee update.
6202         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
6203         let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
6204         check_added_monitors!(nodes[2], 1);
6205         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &raa);
6206         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &commitment_signed);
6207         check_added_monitors!(nodes[1], 2);
6208
6209         // A final RAA message is generated to finalize the fee update.
6210         let events = nodes[1].node.get_and_clear_pending_msg_events();
6211         assert_eq!(events.len(), 1);
6212
6213         let raa_msg = match &events[0] {
6214                 &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => {
6215                         msg.clone()
6216                 },
6217                 _ => panic!("Unexpected event"),
6218         };
6219
6220         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa_msg);
6221         check_added_monitors!(nodes[2], 1);
6222         assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
6223
6224         // nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process.
6225         let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events();
6226         assert_eq!(process_htlc_forwards_event.len(), 2);
6227         match &process_htlc_forwards_event[1] {
6228                 &Event::PendingHTLCsForwardable { .. } => {},
6229                 _ => panic!("Unexpected event"),
6230         }
6231
6232         // In response, we call ChannelManager's process_pending_htlc_forwards
6233         nodes[1].node.process_pending_htlc_forwards();
6234         check_added_monitors!(nodes[1], 1);
6235
6236         // This causes the HTLC to be failed backwards.
6237         let fail_event = nodes[1].node.get_and_clear_pending_msg_events();
6238         assert_eq!(fail_event.len(), 1);
6239         let (fail_msg, commitment_signed) = match &fail_event[0] {
6240                 &MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
6241                         assert_eq!(updates.update_add_htlcs.len(), 0);
6242                         assert_eq!(updates.update_fulfill_htlcs.len(), 0);
6243                         assert_eq!(updates.update_fail_malformed_htlcs.len(), 0);
6244                         assert_eq!(updates.update_fail_htlcs.len(), 1);
6245                         (updates.update_fail_htlcs[0].clone(), updates.commitment_signed.clone())
6246                 },
6247                 _ => panic!("Unexpected event"),
6248         };
6249
6250         // Pass the failure messages back to nodes[0].
6251         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg);
6252         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
6253
6254         // Complete the HTLC failure+removal process.
6255         let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6256         check_added_monitors!(nodes[0], 1);
6257         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
6258         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed);
6259         check_added_monitors!(nodes[1], 2);
6260         let final_raa_event = nodes[1].node.get_and_clear_pending_msg_events();
6261         assert_eq!(final_raa_event.len(), 1);
6262         let raa = match &final_raa_event[0] {
6263                 &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => msg.clone(),
6264                 _ => panic!("Unexpected event"),
6265         };
6266         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa);
6267         expect_payment_failed_with_update!(nodes[0], our_payment_hash, false, chan_1_2.0.contents.short_channel_id, false);
6268         check_added_monitors!(nodes[0], 1);
6269 }
6270
6271 #[test]
6272 fn test_payment_route_reaching_same_channel_twice() {
6273         //A route should not go through the same channel twice
6274         //It is enforced when constructing a route.
6275         let chanmon_cfgs = create_chanmon_cfgs(2);
6276         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6277         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6278         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6279         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
6280
6281         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0)
6282                 .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
6283         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000);
6284
6285         // Extend the path by itself, essentially simulating route going through same channel twice
6286         let cloned_hops = route.paths[0].hops.clone();
6287         route.paths[0].hops.extend_from_slice(&cloned_hops);
6288
6289         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6290                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6291         ), false, APIError::InvalidRoute { ref err },
6292         assert_eq!(err, &"Path went through the same channel twice"));
6293 }
6294
6295 // BOLT 2 Requirements for the Sender when constructing and sending an update_add_htlc message.
6296 // BOLT 2 Requirement: MUST NOT offer amount_msat it cannot pay for in the remote commitment transaction at the current feerate_per_kw (see "Updating Fees") while maintaining its channel reserve.
6297 //TODO: I don't believe this is explicitly enforced when sending an HTLC but as the Fee aspect of the BOLT specs is in flux leaving this as a TODO.
6298
6299 #[test]
6300 fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() {
6301         //BOLT2 Requirement: MUST NOT offer amount_msat below the receiving node's htlc_minimum_msat (same validation check catches both of these)
6302         let chanmon_cfgs = create_chanmon_cfgs(2);
6303         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6304         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6305         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6306         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6307
6308         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6309         route.paths[0].hops[0].fee_msat = 100;
6310
6311         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6312                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6313                 ), true, APIError::ChannelUnavailable { .. }, {});
6314         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6315 }
6316
6317 #[test]
6318 fn test_update_add_htlc_bolt2_sender_zero_value_msat() {
6319         //BOLT2 Requirement: MUST offer amount_msat greater than 0.
6320         let chanmon_cfgs = create_chanmon_cfgs(2);
6321         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6322         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6323         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6324         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6325
6326         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6327         route.paths[0].hops[0].fee_msat = 0;
6328         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6329                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)),
6330                 true, APIError::ChannelUnavailable { ref err },
6331                 assert_eq!(err, "Cannot send 0-msat HTLC"));
6332
6333         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6334         nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send 0-msat HTLC", 1);
6335 }
6336
6337 #[test]
6338 fn test_update_add_htlc_bolt2_receiver_zero_value_msat() {
6339         //BOLT2 Requirement: MUST offer amount_msat greater than 0.
6340         let chanmon_cfgs = create_chanmon_cfgs(2);
6341         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6342         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6343         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6344         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6345
6346         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6347         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6348                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6349         check_added_monitors!(nodes[0], 1);
6350         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6351         updates.update_add_htlcs[0].amount_msat = 0;
6352
6353         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6354         nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC", 3);
6355         check_closed_broadcast!(nodes[1], true).unwrap();
6356         check_added_monitors!(nodes[1], 1);
6357         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() },
6358                 [nodes[0].node.get_our_node_id()], 100000);
6359 }
6360
6361 #[test]
6362 fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() {
6363         //BOLT 2 Requirement: MUST set cltv_expiry less than 500000000.
6364         //It is enforced when constructing a route.
6365         let chanmon_cfgs = create_chanmon_cfgs(2);
6366         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6367         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6368         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6369         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
6370
6371         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0)
6372                 .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
6373         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000);
6374         route.paths[0].hops.last_mut().unwrap().cltv_expiry_delta = 500000001;
6375         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6376                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6377                 ), true, APIError::InvalidRoute { ref err },
6378                 assert_eq!(err, &"Channel CLTV overflowed?"));
6379 }
6380
6381 #[test]
6382 fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() {
6383         //BOLT 2 Requirement: if result would be offering more than the remote's max_accepted_htlcs HTLCs, in the remote commitment transaction: MUST NOT add an HTLC.
6384         //BOLT 2 Requirement: for the first HTLC it offers MUST set id to 0.
6385         //BOLT 2 Requirement: MUST increase the value of id by 1 for each successive offer.
6386         let chanmon_cfgs = create_chanmon_cfgs(2);
6387         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6388         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6389         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6390         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
6391         let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
6392                 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().counterparty_max_accepted_htlcs as u64;
6393
6394         // Fetch a route in advance as we will be unable to once we're unable to send.
6395         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6396         for i in 0..max_accepted_htlcs {
6397                 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6398                 let payment_event = {
6399                         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6400                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6401                         check_added_monitors!(nodes[0], 1);
6402
6403                         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6404                         assert_eq!(events.len(), 1);
6405                         if let MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate{ update_add_htlcs: ref htlcs, .. }, } = events[0] {
6406                                 assert_eq!(htlcs[0].htlc_id, i);
6407                         } else {
6408                                 assert!(false);
6409                         }
6410                         SendEvent::from_event(events.remove(0))
6411                 };
6412                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6413                 check_added_monitors!(nodes[1], 0);
6414                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6415
6416                 expect_pending_htlcs_forwardable!(nodes[1]);
6417                 expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 100000);
6418         }
6419         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6420                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6421                 ), true, APIError::ChannelUnavailable { .. }, {});
6422
6423         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6424 }
6425
6426 #[test]
6427 fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() {
6428         //BOLT 2 Requirement: if the sum of total offered HTLCs would exceed the remote's max_htlc_value_in_flight_msat: MUST NOT add an HTLC.
6429         let chanmon_cfgs = create_chanmon_cfgs(2);
6430         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6431         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6432         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6433         let channel_value = 100000;
6434         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 0);
6435         let max_in_flight = get_channel_value_stat!(nodes[0], nodes[1], chan.2).counterparty_max_htlc_value_in_flight_msat;
6436
6437         send_payment(&nodes[0], &vec!(&nodes[1])[..], max_in_flight);
6438
6439         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_in_flight);
6440         // Manually create a route over our max in flight (which our router normally automatically
6441         // limits us to.
6442         route.paths[0].hops[0].fee_msat =  max_in_flight + 1;
6443         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6444                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6445                 ), true, APIError::ChannelUnavailable { .. }, {});
6446         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6447
6448         send_payment(&nodes[0], &[&nodes[1]], max_in_flight);
6449 }
6450
6451 // BOLT 2 Requirements for the Receiver when handling an update_add_htlc message.
6452 #[test]
6453 fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
6454         //BOLT2 Requirement: receiving an amount_msat equal to 0, OR less than its own htlc_minimum_msat -> SHOULD fail the channel.
6455         let chanmon_cfgs = create_chanmon_cfgs(2);
6456         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6457         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6458         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6459         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6460         let htlc_minimum_msat: u64;
6461         {
6462                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
6463                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
6464                 let channel = chan_lock.channel_by_id.get(&chan.2).unwrap();
6465                 htlc_minimum_msat = channel.context().get_holder_htlc_minimum_msat();
6466         }
6467
6468         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat);
6469         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6470                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6471         check_added_monitors!(nodes[0], 1);
6472         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6473         updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat-1;
6474         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6475         assert!(nodes[1].node.list_channels().is_empty());
6476         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6477         assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
6478         check_added_monitors!(nodes[1], 1);
6479         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6480 }
6481
6482 #[test]
6483 fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
6484         //BOLT2 Requirement: receiving an amount_msat that the sending node cannot afford at the current feerate_per_kw (while maintaining its channel reserve): SHOULD fail the channel
6485         let chanmon_cfgs = create_chanmon_cfgs(2);
6486         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6487         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6488         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6489         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6490
6491         let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6492         let channel_reserve = chan_stat.channel_reserve_msat;
6493         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
6494         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
6495         // The 2* and +1 are for the fee spike reserve.
6496         let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
6497
6498         let max_can_send = 5000000 - channel_reserve - commit_tx_fee_outbound;
6499         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
6500         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6501                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6502         check_added_monitors!(nodes[0], 1);
6503         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6504
6505         // Even though channel-initiator senders are required to respect the fee_spike_reserve,
6506         // at this time channel-initiatee receivers are not required to enforce that senders
6507         // respect the fee_spike_reserve.
6508         updates.update_add_htlcs[0].amount_msat = max_can_send + commit_tx_fee_outbound + 1;
6509         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6510
6511         assert!(nodes[1].node.list_channels().is_empty());
6512         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6513         assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
6514         check_added_monitors!(nodes[1], 1);
6515         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6516 }
6517
6518 #[test]
6519 fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
6520         //BOLT 2 Requirement: if a sending node adds more than its max_accepted_htlcs HTLCs to its local commitment transaction: SHOULD fail the channel
6521         //BOLT 2 Requirement: MUST allow multiple HTLCs with the same payment_hash.
6522         let chanmon_cfgs = create_chanmon_cfgs(2);
6523         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6524         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6525         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6526         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6527
6528         let send_amt = 3999999;
6529         let (mut route, our_payment_hash, _, our_payment_secret) =
6530                 get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
6531         route.paths[0].hops[0].fee_msat = send_amt;
6532         let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
6533         let cur_height = nodes[0].node.best_block.read().unwrap().height + 1;
6534         let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route.paths[0], &session_priv).unwrap();
6535         let recipient_onion_fields = RecipientOnionFields::secret_only(our_payment_secret);
6536         let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
6537                 &route.paths[0], send_amt, &recipient_onion_fields, cur_height, &None).unwrap();
6538         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap();
6539
6540         let mut msg = msgs::UpdateAddHTLC {
6541                 channel_id: chan.2,
6542                 htlc_id: 0,
6543                 amount_msat: 1000,
6544                 payment_hash: our_payment_hash,
6545                 cltv_expiry: htlc_cltv,
6546                 onion_routing_packet: onion_packet.clone(),
6547                 skimmed_fee_msat: None,
6548                 blinding_point: None,
6549         };
6550
6551         for i in 0..50 {
6552                 msg.htlc_id = i as u64;
6553                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
6554         }
6555         msg.htlc_id = (50) as u64;
6556         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
6557
6558         assert!(nodes[1].node.list_channels().is_empty());
6559         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6560         assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
6561         check_added_monitors!(nodes[1], 1);
6562         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6563 }
6564
6565 #[test]
6566 fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() {
6567         //OR adds more than its max_htlc_value_in_flight_msat worth of offered HTLCs to its local commitment transaction: SHOULD fail the channel
6568         let chanmon_cfgs = create_chanmon_cfgs(2);
6569         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6570         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6571         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6572         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6573
6574         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6575         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6576                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6577         check_added_monitors!(nodes[0], 1);
6578         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6579         updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], nodes[0], chan.2).counterparty_max_htlc_value_in_flight_msat + 1;
6580         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6581
6582         assert!(nodes[1].node.list_channels().is_empty());
6583         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6584         assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str()));
6585         check_added_monitors!(nodes[1], 1);
6586         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 1000000);
6587 }
6588
6589 #[test]
6590 fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() {
6591         //BOLT2 Requirement: if sending node sets cltv_expiry to greater or equal to 500000000: SHOULD fail the channel.
6592         let chanmon_cfgs = create_chanmon_cfgs(2);
6593         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6594         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6595         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6596
6597         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6598         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6599         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6600                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6601         check_added_monitors!(nodes[0], 1);
6602         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6603         updates.update_add_htlcs[0].cltv_expiry = 500000000;
6604         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6605
6606         assert!(nodes[1].node.list_channels().is_empty());
6607         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6608         assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height");
6609         check_added_monitors!(nodes[1], 1);
6610         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6611 }
6612
6613 #[test]
6614 fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
6615         //BOLT 2 requirement: if the sender did not previously acknowledge the commitment of that HTLC: MUST ignore a repeated id value after a reconnection.
6616         // We test this by first testing that that repeated HTLCs pass commitment signature checks
6617         // after disconnect and that non-sequential htlc_ids result in a channel failure.
6618         let chanmon_cfgs = create_chanmon_cfgs(2);
6619         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6620         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6621         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6622
6623         create_announced_chan_between_nodes(&nodes, 0, 1);
6624         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6625         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6626                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6627         check_added_monitors!(nodes[0], 1);
6628         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6629         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6630
6631         //Disconnect and Reconnect
6632         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
6633         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
6634         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
6635                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
6636         }, true).unwrap();
6637         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
6638         assert_eq!(reestablish_1.len(), 1);
6639         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
6640                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
6641         }, false).unwrap();
6642         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
6643         assert_eq!(reestablish_2.len(), 1);
6644         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
6645         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
6646         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
6647         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
6648
6649         //Resend HTLC
6650         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6651         assert_eq!(updates.commitment_signed.htlc_signatures.len(), 1);
6652         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed);
6653         check_added_monitors!(nodes[1], 1);
6654         let _bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6655
6656         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6657
6658         assert!(nodes[1].node.list_channels().is_empty());
6659         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6660         assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str()));
6661         check_added_monitors!(nodes[1], 1);
6662         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6663 }
6664
6665 #[test]
6666 fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() {
6667         //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6668
6669         let chanmon_cfgs = create_chanmon_cfgs(2);
6670         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6671         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6672         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6673         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6674         let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6675         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6676                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6677
6678         check_added_monitors!(nodes[0], 1);
6679         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6680         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6681
6682         let update_msg = msgs::UpdateFulfillHTLC{
6683                 channel_id: chan.2,
6684                 htlc_id: 0,
6685                 payment_preimage: our_payment_preimage,
6686         };
6687
6688         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6689
6690         assert!(nodes[0].node.list_channels().is_empty());
6691         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6692         assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6693         check_added_monitors!(nodes[0], 1);
6694         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6695 }
6696
6697 #[test]
6698 fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() {
6699         //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6700
6701         let chanmon_cfgs = create_chanmon_cfgs(2);
6702         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6703         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6704         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6705         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6706
6707         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6708         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6709                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6710         check_added_monitors!(nodes[0], 1);
6711         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6712         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6713
6714         let update_msg = msgs::UpdateFailHTLC{
6715                 channel_id: chan.2,
6716                 htlc_id: 0,
6717                 reason: msgs::OnionErrorPacket { data: Vec::new()},
6718         };
6719
6720         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6721
6722         assert!(nodes[0].node.list_channels().is_empty());
6723         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6724         assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6725         check_added_monitors!(nodes[0], 1);
6726         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6727 }
6728
6729 #[test]
6730 fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() {
6731         //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6732
6733         let chanmon_cfgs = create_chanmon_cfgs(2);
6734         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6735         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6736         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6737         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6738
6739         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6740         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6741                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6742         check_added_monitors!(nodes[0], 1);
6743         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6744         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6745         let update_msg = msgs::UpdateFailMalformedHTLC{
6746                 channel_id: chan.2,
6747                 htlc_id: 0,
6748                 sha256_of_onion: [1; 32],
6749                 failure_code: 0x8000,
6750         };
6751
6752         nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6753
6754         assert!(nodes[0].node.list_channels().is_empty());
6755         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6756         assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6757         check_added_monitors!(nodes[0], 1);
6758         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6759 }
6760
6761 #[test]
6762 fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
6763         //BOLT 2 Requirement: A receiving node: if the id does not correspond to an HTLC in its current commitment transaction MUST fail the channel.
6764
6765         let chanmon_cfgs = create_chanmon_cfgs(2);
6766         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6767         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6768         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6769         create_announced_chan_between_nodes(&nodes, 0, 1);
6770
6771         let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
6772
6773         nodes[1].node.claim_funds(our_payment_preimage);
6774         check_added_monitors!(nodes[1], 1);
6775         expect_payment_claimed!(nodes[1], our_payment_hash, 100_000);
6776
6777         let events = nodes[1].node.get_and_clear_pending_msg_events();
6778         assert_eq!(events.len(), 1);
6779         let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
6780                 match events[0] {
6781                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6782                                 assert!(update_add_htlcs.is_empty());
6783                                 assert_eq!(update_fulfill_htlcs.len(), 1);
6784                                 assert!(update_fail_htlcs.is_empty());
6785                                 assert!(update_fail_malformed_htlcs.is_empty());
6786                                 assert!(update_fee.is_none());
6787                                 update_fulfill_htlcs[0].clone()
6788                         },
6789                         _ => panic!("Unexpected event"),
6790                 }
6791         };
6792
6793         update_fulfill_msg.htlc_id = 1;
6794
6795         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
6796
6797         assert!(nodes[0].node.list_channels().is_empty());
6798         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6799         assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find");
6800         check_added_monitors!(nodes[0], 1);
6801         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6802 }
6803
6804 #[test]
6805 fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
6806         //BOLT 2 Requirement: A receiving node: if the payment_preimage value in update_fulfill_htlc doesn't SHA256 hash to the corresponding HTLC payment_hash MUST fail the channel.
6807
6808         let chanmon_cfgs = create_chanmon_cfgs(2);
6809         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6810         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6811         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6812         create_announced_chan_between_nodes(&nodes, 0, 1);
6813
6814         let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
6815
6816         nodes[1].node.claim_funds(our_payment_preimage);
6817         check_added_monitors!(nodes[1], 1);
6818         expect_payment_claimed!(nodes[1], our_payment_hash, 100_000);
6819
6820         let events = nodes[1].node.get_and_clear_pending_msg_events();
6821         assert_eq!(events.len(), 1);
6822         let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
6823                 match events[0] {
6824                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6825                                 assert!(update_add_htlcs.is_empty());
6826                                 assert_eq!(update_fulfill_htlcs.len(), 1);
6827                                 assert!(update_fail_htlcs.is_empty());
6828                                 assert!(update_fail_malformed_htlcs.is_empty());
6829                                 assert!(update_fee.is_none());
6830                                 update_fulfill_htlcs[0].clone()
6831                         },
6832                         _ => panic!("Unexpected event"),
6833                 }
6834         };
6835
6836         update_fulfill_msg.payment_preimage = PaymentPreimage([1; 32]);
6837
6838         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
6839
6840         assert!(nodes[0].node.list_channels().is_empty());
6841         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6842         assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str()));
6843         check_added_monitors!(nodes[0], 1);
6844         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6845 }
6846
6847 #[test]
6848 fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_message() {
6849         //BOLT 2 Requirement: A receiving node: if the BADONION bit in failure_code is not set for update_fail_malformed_htlc MUST fail the channel.
6850
6851         let chanmon_cfgs = create_chanmon_cfgs(2);
6852         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6853         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6854         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6855         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6856
6857         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6858         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6859                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6860         check_added_monitors!(nodes[0], 1);
6861
6862         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6863         updates.update_add_htlcs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
6864
6865         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6866         check_added_monitors!(nodes[1], 0);
6867         commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true);
6868
6869         let events = nodes[1].node.get_and_clear_pending_msg_events();
6870
6871         let mut update_msg: msgs::UpdateFailMalformedHTLC = {
6872                 match events[0] {
6873                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6874                                 assert!(update_add_htlcs.is_empty());
6875                                 assert!(update_fulfill_htlcs.is_empty());
6876                                 assert!(update_fail_htlcs.is_empty());
6877                                 assert_eq!(update_fail_malformed_htlcs.len(), 1);
6878                                 assert!(update_fee.is_none());
6879                                 update_fail_malformed_htlcs[0].clone()
6880                         },
6881                         _ => panic!("Unexpected event"),
6882                 }
6883         };
6884         update_msg.failure_code &= !0x8000;
6885         nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6886
6887         assert!(nodes[0].node.list_channels().is_empty());
6888         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6889         assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set");
6890         check_added_monitors!(nodes[0], 1);
6891         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 1000000);
6892 }
6893
6894 #[test]
6895 fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_update_fail_htlc() {
6896         //BOLT 2 Requirement: a receiving node which has an outgoing HTLC canceled by update_fail_malformed_htlc:
6897         //    * MUST return an error in the update_fail_htlc sent to the link which originally sent the HTLC, using the failure_code given and setting the data to sha256_of_onion.
6898
6899         let chanmon_cfgs = create_chanmon_cfgs(3);
6900         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6901         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
6902         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6903         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6904         let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000);
6905
6906         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
6907
6908         //First hop
6909         let mut payment_event = {
6910                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6911                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6912                 check_added_monitors!(nodes[0], 1);
6913                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6914                 assert_eq!(events.len(), 1);
6915                 SendEvent::from_event(events.remove(0))
6916         };
6917         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6918         check_added_monitors!(nodes[1], 0);
6919         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6920         expect_pending_htlcs_forwardable!(nodes[1]);
6921         let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
6922         assert_eq!(events_2.len(), 1);
6923         check_added_monitors!(nodes[1], 1);
6924         payment_event = SendEvent::from_event(events_2.remove(0));
6925         assert_eq!(payment_event.msgs.len(), 1);
6926
6927         //Second Hop
6928         payment_event.msgs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
6929         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
6930         check_added_monitors!(nodes[2], 0);
6931         commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true);
6932
6933         let events_3 = nodes[2].node.get_and_clear_pending_msg_events();
6934         assert_eq!(events_3.len(), 1);
6935         let update_msg : (msgs::UpdateFailMalformedHTLC, msgs::CommitmentSigned) = {
6936                 match events_3[0] {
6937                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
6938                                 assert!(update_add_htlcs.is_empty());
6939                                 assert!(update_fulfill_htlcs.is_empty());
6940                                 assert!(update_fail_htlcs.is_empty());
6941                                 assert_eq!(update_fail_malformed_htlcs.len(), 1);
6942                                 assert!(update_fee.is_none());
6943                                 (update_fail_malformed_htlcs[0].clone(), commitment_signed.clone())
6944                         },
6945                         _ => panic!("Unexpected event"),
6946                 }
6947         };
6948
6949         nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), &update_msg.0);
6950
6951         check_added_monitors!(nodes[1], 0);
6952         commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true);
6953         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
6954         let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
6955         assert_eq!(events_4.len(), 1);
6956
6957         //Confirm that handlinge the update_malformed_htlc message produces an update_fail_htlc message to be forwarded back along the route
6958         match events_4[0] {
6959                 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6960                         assert!(update_add_htlcs.is_empty());
6961                         assert!(update_fulfill_htlcs.is_empty());
6962                         assert_eq!(update_fail_htlcs.len(), 1);
6963                         assert!(update_fail_malformed_htlcs.is_empty());
6964                         assert!(update_fee.is_none());
6965                 },
6966                 _ => panic!("Unexpected event"),
6967         };
6968
6969         check_added_monitors!(nodes[1], 1);
6970 }
6971
6972 #[test]
6973 fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() {
6974         let chanmon_cfgs = create_chanmon_cfgs(3);
6975         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6976         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
6977         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6978         create_announced_chan_between_nodes(&nodes, 0, 1);
6979         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
6980
6981         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100_000);
6982
6983         // First hop
6984         let mut payment_event = {
6985                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6986                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6987                 check_added_monitors!(nodes[0], 1);
6988                 SendEvent::from_node(&nodes[0])
6989         };
6990
6991         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6992         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6993         expect_pending_htlcs_forwardable!(nodes[1]);
6994         check_added_monitors!(nodes[1], 1);
6995         payment_event = SendEvent::from_node(&nodes[1]);
6996         assert_eq!(payment_event.msgs.len(), 1);
6997
6998         // Second Hop
6999         payment_event.msgs[0].onion_routing_packet.version = 1; // Trigger an invalid_onion_version error
7000         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
7001         check_added_monitors!(nodes[2], 0);
7002         commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true);
7003
7004         let events_3 = nodes[2].node.get_and_clear_pending_msg_events();
7005         assert_eq!(events_3.len(), 1);
7006         match events_3[0] {
7007                 MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
7008                         let mut update_msg = updates.update_fail_malformed_htlcs[0].clone();
7009                         // Set the NODE bit (BADONION and PERM already set in invalid_onion_version error)
7010                         update_msg.failure_code |= 0x2000;
7011
7012                         nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), &update_msg);
7013                         commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true);
7014                 },
7015                 _ => panic!("Unexpected event"),
7016         }
7017
7018         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1],
7019                 vec![HTLCDestination::NextHopChannel {
7020                         node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
7021         let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
7022         assert_eq!(events_4.len(), 1);
7023         check_added_monitors!(nodes[1], 1);
7024
7025         match events_4[0] {
7026                 MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
7027                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
7028                         commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true);
7029                 },
7030                 _ => panic!("Unexpected event"),
7031         }
7032
7033         let events_5 = nodes[0].node.get_and_clear_pending_events();
7034         assert_eq!(events_5.len(), 2);
7035
7036         // Expect a PaymentPathFailed event with a ChannelFailure network update for the channel between
7037         // the node originating the error to its next hop.
7038         match events_5[0] {
7039                 Event::PaymentPathFailed { error_code, failure: PathFailure::OnPath { network_update: Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent }) }, ..
7040                 } => {
7041                         assert_eq!(short_channel_id, chan_2.0.contents.short_channel_id);
7042                         assert!(is_permanent);
7043                         assert_eq!(error_code, Some(0x8000|0x4000|0x2000|4));
7044                 },
7045                 _ => panic!("Unexpected event"),
7046         }
7047         match events_5[1] {
7048                 Event::PaymentFailed { payment_hash, .. } => {
7049                         assert_eq!(payment_hash, our_payment_hash);
7050                 },
7051                 _ => panic!("Unexpected event"),
7052         }
7053
7054         // TODO: Test actual removal of channel from NetworkGraph when it's implemented.
7055 }
7056
7057 fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
7058         // Dust-HTLC failure updates must be delayed until failure-trigger tx (in this case local commitment) reach ANTI_REORG_DELAY
7059         // We can have at most two valid local commitment tx, so both cases must be covered, and both txs must be checked to get them all as
7060         // HTLC could have been removed from lastest local commitment tx but still valid until we get remote RAA
7061
7062         let mut chanmon_cfgs = create_chanmon_cfgs(2);
7063         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
7064         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7065         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7066         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7067         let chan =create_announced_chan_between_nodes(&nodes, 0, 1);
7068
7069         let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
7070                 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
7071
7072         // We route 2 dust-HTLCs between A and B
7073         let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
7074         let (_, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
7075         route_payment(&nodes[0], &[&nodes[1]], 1000000);
7076
7077         // Cache one local commitment tx as previous
7078         let as_prev_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
7079
7080         // Fail one HTLC to prune it in the will-be-latest-local commitment tx
7081         nodes[1].node.fail_htlc_backwards(&payment_hash_2);
7082         check_added_monitors!(nodes[1], 0);
7083         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
7084         check_added_monitors!(nodes[1], 1);
7085
7086         let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
7087         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &remove.update_fail_htlcs[0]);
7088         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &remove.commitment_signed);
7089         check_added_monitors!(nodes[0], 1);
7090
7091         // Cache one local commitment tx as lastest
7092         let as_last_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
7093
7094         let events = nodes[0].node.get_and_clear_pending_msg_events();
7095         match events[0] {
7096                 MessageSendEvent::SendRevokeAndACK { node_id, .. } => {
7097                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
7098                 },
7099                 _ => panic!("Unexpected event"),
7100         }
7101         match events[1] {
7102                 MessageSendEvent::UpdateHTLCs { node_id, .. } => {
7103                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
7104                 },
7105                 _ => panic!("Unexpected event"),
7106         }
7107
7108         assert_ne!(as_prev_commitment_tx, as_last_commitment_tx);
7109         // Fail the 2 dust-HTLCs, move their failure in maturation buffer (htlc_updated_waiting_threshold_conf)
7110         if announce_latest {
7111                 mine_transaction(&nodes[0], &as_last_commitment_tx[0]);
7112         } else {
7113                 mine_transaction(&nodes[0], &as_prev_commitment_tx[0]);
7114         }
7115
7116         check_closed_broadcast!(nodes[0], true);
7117         check_added_monitors!(nodes[0], 1);
7118         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
7119
7120         assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7121         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7122         let events = nodes[0].node.get_and_clear_pending_events();
7123         // Only 2 PaymentPathFailed events should show up, over-dust HTLC has to be failed by timeout tx
7124         assert_eq!(events.len(), 4);
7125         let mut first_failed = false;
7126         for event in events {
7127                 match event {
7128                         Event::PaymentPathFailed { payment_hash, .. } => {
7129                                 if payment_hash == payment_hash_1 {
7130                                         assert!(!first_failed);
7131                                         first_failed = true;
7132                                 } else {
7133                                         assert_eq!(payment_hash, payment_hash_2);
7134                                 }
7135                         },
7136                         Event::PaymentFailed { .. } => {}
7137                         _ => panic!("Unexpected event"),
7138                 }
7139         }
7140 }
7141
7142 #[test]
7143 fn test_failure_delay_dust_htlc_local_commitment() {
7144         do_test_failure_delay_dust_htlc_local_commitment(true);
7145         do_test_failure_delay_dust_htlc_local_commitment(false);
7146 }
7147
7148 fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
7149         // Outbound HTLC-failure updates must be cancelled if we get a reorg before we reach ANTI_REORG_DELAY.
7150         // Broadcast of revoked remote commitment tx, trigger failure-update of dust/non-dust HTLCs
7151         // Broadcast of remote commitment tx, trigger failure-update of dust-HTLCs
7152         // Broadcast of timeout tx on remote commitment tx, trigger failure-udate of non-dust HTLCs
7153         // Broadcast of local commitment tx, trigger failure-update of dust-HTLCs
7154         // Broadcast of HTLC-timeout tx on local commitment tx, trigger failure-update of non-dust HTLCs
7155
7156         let chanmon_cfgs = create_chanmon_cfgs(3);
7157         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
7158         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
7159         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
7160         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
7161
7162         let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
7163                 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
7164
7165         let (_payment_preimage_1, dust_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
7166         let (_payment_preimage_2, non_dust_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
7167
7168         let as_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
7169         let bs_commitment_tx = get_local_commitment_txn!(nodes[1], chan.2);
7170
7171         // We revoked bs_commitment_tx
7172         if revoked {
7173                 let (payment_preimage_3, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
7174                 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
7175         }
7176
7177         let mut timeout_tx = Vec::new();
7178         if local {
7179                 // We fail dust-HTLC 1 by broadcast of local commitment tx
7180                 mine_transaction(&nodes[0], &as_commitment_tx[0]);
7181                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
7182                 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7183                 expect_payment_failed!(nodes[0], dust_hash, false);
7184
7185                 connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS - ANTI_REORG_DELAY);
7186                 check_closed_broadcast!(nodes[0], true);
7187                 check_added_monitors!(nodes[0], 1);
7188                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7189                 timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
7190                 assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
7191                 // We fail non-dust-HTLC 2 by broadcast of local HTLC-timeout tx on local commitment tx
7192                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7193                 mine_transaction(&nodes[0], &timeout_tx[0]);
7194                 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7195                 expect_payment_failed!(nodes[0], non_dust_hash, false);
7196         } else {
7197                 // We fail dust-HTLC 1 by broadcast of remote commitment tx. If revoked, fail also non-dust HTLC
7198                 mine_transaction(&nodes[0], &bs_commitment_tx[0]);
7199                 check_closed_broadcast!(nodes[0], true);
7200                 check_added_monitors!(nodes[0], 1);
7201                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
7202                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7203
7204                 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
7205                 timeout_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().drain(..)
7206                         .filter(|tx| tx.input[0].previous_output.txid == bs_commitment_tx[0].txid()).collect();
7207                 check_spends!(timeout_tx[0], bs_commitment_tx[0]);
7208                 // For both a revoked or non-revoked commitment transaction, after ANTI_REORG_DELAY the
7209                 // dust HTLC should have been failed.
7210                 expect_payment_failed!(nodes[0], dust_hash, false);
7211
7212                 if !revoked {
7213                         assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
7214                 } else {
7215                         assert_eq!(timeout_tx[0].lock_time.to_consensus_u32(), 11);
7216                 }
7217                 // We fail non-dust-HTLC 2 by broadcast of local timeout/revocation-claim tx
7218                 mine_transaction(&nodes[0], &timeout_tx[0]);
7219                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7220                 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7221                 expect_payment_failed!(nodes[0], non_dust_hash, false);
7222         }
7223 }
7224
7225 #[test]
7226 fn test_sweep_outbound_htlc_failure_update() {
7227         do_test_sweep_outbound_htlc_failure_update(false, true);
7228         do_test_sweep_outbound_htlc_failure_update(false, false);
7229         do_test_sweep_outbound_htlc_failure_update(true, false);
7230 }
7231
7232 #[test]
7233 fn test_user_configurable_csv_delay() {
7234         // We test our channel constructors yield errors when we pass them absurd csv delay
7235
7236         let mut low_our_to_self_config = UserConfig::default();
7237         low_our_to_self_config.channel_handshake_config.our_to_self_delay = 6;
7238         let mut high_their_to_self_config = UserConfig::default();
7239         high_their_to_self_config.channel_handshake_limits.their_to_self_delay = 100;
7240         let user_cfgs = [Some(high_their_to_self_config.clone()), None];
7241         let chanmon_cfgs = create_chanmon_cfgs(2);
7242         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7243         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
7244         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7245
7246         // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in OutboundV1Channel::new()
7247         if let Err(error) = OutboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
7248                 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[1].node.init_features(), 1000000, 1000000, 0,
7249                 &low_our_to_self_config, 0, 42, None)
7250         {
7251                 match error {
7252                         APIError::APIMisuseError { err } => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); },
7253                         _ => panic!("Unexpected event"),
7254                 }
7255         } else { assert!(false) }
7256
7257         // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in InboundV1Channel::new()
7258         nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
7259         let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
7260         open_channel.common_fields.to_self_delay = 200;
7261         if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
7262                 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
7263                 &low_our_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
7264         {
7265                 match error {
7266                         ChannelError::Close((err, _)) => {
7267                                 let regex = regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap();
7268                                 assert!(regex.is_match(err.as_str()));
7269                         },
7270                         _ => panic!("Unexpected event"),
7271                 }
7272         } else { assert!(false); }
7273
7274         // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Chanel::accept_channel()
7275         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
7276         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
7277         let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
7278         accept_channel.common_fields.to_self_delay = 200;
7279         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
7280         let reason_msg;
7281         if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] {
7282                 match action {
7283                         &ErrorAction::SendErrorMessage { ref msg } => {
7284                                 assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(msg.data.as_str()));
7285                                 reason_msg = msg.data.clone();
7286                         },
7287                         _ => { panic!(); }
7288                 }
7289         } else { panic!(); }
7290         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg }, [nodes[1].node.get_our_node_id()], 1000000);
7291
7292         // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new()
7293         nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
7294         let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
7295         open_channel.common_fields.to_self_delay = 200;
7296         if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
7297                 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
7298                 &high_their_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
7299         {
7300                 match error {
7301                         ChannelError::Close((err, _)) => {
7302                                 let regex = regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap();
7303                                 assert!(regex.is_match(err.as_str()));
7304                         },
7305                         _ => panic!("Unexpected event"),
7306                 }
7307         } else { assert!(false); }
7308 }
7309
7310 #[test]
7311 fn test_check_htlc_underpaying() {
7312         // Send payment through A -> B but A is maliciously
7313         // sending a probe payment (i.e less than expected value0
7314         // to B, B should refuse payment.
7315
7316         let chanmon_cfgs = create_chanmon_cfgs(2);
7317         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7318         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7319         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7320
7321         // Create some initial channels
7322         create_announced_chan_between_nodes(&nodes, 0, 1);
7323
7324         let scorer = test_utils::TestScorer::new();
7325         let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
7326         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
7327                 .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
7328         let route_params = RouteParameters::from_payment_params_and_value(payment_params, 10_000);
7329         let route = get_route(&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(),
7330                 None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
7331         let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]);
7332         let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, None).unwrap();
7333         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
7334                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
7335         check_added_monitors!(nodes[0], 1);
7336
7337         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
7338         assert_eq!(events.len(), 1);
7339         let mut payment_event = SendEvent::from_event(events.pop().unwrap());
7340         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
7341         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
7342
7343         // Note that we first have to wait a random delay before processing the receipt of the HTLC,
7344         // and then will wait a second random delay before failing the HTLC back:
7345         expect_pending_htlcs_forwardable!(nodes[1]);
7346         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
7347
7348         // Node 3 is expecting payment of 100_000 but received 10_000,
7349         // it should fail htlc like we didn't know the preimage.
7350         nodes[1].node.process_pending_htlc_forwards();
7351
7352         let events = nodes[1].node.get_and_clear_pending_msg_events();
7353         assert_eq!(events.len(), 1);
7354         let (update_fail_htlc, commitment_signed) = match events[0] {
7355                 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
7356                         assert!(update_add_htlcs.is_empty());
7357                         assert!(update_fulfill_htlcs.is_empty());
7358                         assert_eq!(update_fail_htlcs.len(), 1);
7359                         assert!(update_fail_malformed_htlcs.is_empty());
7360                         assert!(update_fee.is_none());
7361                         (update_fail_htlcs[0].clone(), commitment_signed)
7362                 },
7363                 _ => panic!("Unexpected event"),
7364         };
7365         check_added_monitors!(nodes[1], 1);
7366
7367         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlc);
7368         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
7369
7370         // 10_000 msat as u64, followed by a height of CHAN_CONFIRM_DEPTH as u32
7371         let mut expected_failure_data = (10_000 as u64).to_be_bytes().to_vec();
7372         expected_failure_data.extend_from_slice(&CHAN_CONFIRM_DEPTH.to_be_bytes());
7373         expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000|15, &expected_failure_data[..]);
7374 }
7375
7376 #[test]
7377 fn test_announce_disable_channels() {
7378         // Create 2 channels between A and B. Disconnect B. Call timer_tick_occurred and check for generated
7379         // ChannelUpdate. Reconnect B, reestablish and check there is non-generated ChannelUpdate.
7380
7381         let chanmon_cfgs = create_chanmon_cfgs(2);
7382         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7383         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7384         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7385
7386         // Connect a dummy node for proper future events broadcasting
7387         connect_dummy_node(&nodes[0]);
7388
7389         create_announced_chan_between_nodes(&nodes, 0, 1);
7390         create_announced_chan_between_nodes(&nodes, 1, 0);
7391         create_announced_chan_between_nodes(&nodes, 0, 1);
7392
7393         // Disconnect peers
7394         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
7395         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
7396
7397         for _ in 0..DISABLE_GOSSIP_TICKS + 1 {
7398                 nodes[0].node.timer_tick_occurred();
7399         }
7400         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
7401         assert_eq!(msg_events.len(), 3);
7402         let mut chans_disabled = new_hash_map();
7403         for e in msg_events {
7404                 match e {
7405                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
7406                                 assert_eq!(msg.contents.flags & (1<<1), 1<<1); // The "channel disabled" bit should be set
7407                                 // Check that each channel gets updated exactly once
7408                                 if chans_disabled.insert(msg.contents.short_channel_id, msg.contents.timestamp).is_some() {
7409                                         panic!("Generated ChannelUpdate for wrong chan!");
7410                                 }
7411                         },
7412                         _ => panic!("Unexpected event"),
7413                 }
7414         }
7415         // Reconnect peers
7416         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
7417                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
7418         }, true).unwrap();
7419         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
7420         assert_eq!(reestablish_1.len(), 3);
7421         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
7422                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
7423         }, false).unwrap();
7424         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
7425         assert_eq!(reestablish_2.len(), 3);
7426
7427         // Reestablish chan_1
7428         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
7429         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7430         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
7431         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7432         // Reestablish chan_2
7433         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[1]);
7434         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7435         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[1]);
7436         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7437         // Reestablish chan_3
7438         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[2]);
7439         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7440         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[2]);
7441         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7442
7443         for _ in 0..ENABLE_GOSSIP_TICKS {
7444                 nodes[0].node.timer_tick_occurred();
7445         }
7446         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7447         nodes[0].node.timer_tick_occurred();
7448         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
7449         assert_eq!(msg_events.len(), 3);
7450         for e in msg_events {
7451                 match e {
7452                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
7453                                 assert_eq!(msg.contents.flags & (1<<1), 0); // The "channel disabled" bit should be off
7454                                 match chans_disabled.remove(&msg.contents.short_channel_id) {
7455                                         // Each update should have a higher timestamp than the previous one, replacing
7456                                         // the old one.
7457                                         Some(prev_timestamp) => assert!(msg.contents.timestamp > prev_timestamp),
7458                                         None => panic!("Generated ChannelUpdate for wrong chan!"),
7459                                 }
7460                         },
7461                         _ => panic!("Unexpected event"),
7462                 }
7463         }
7464         // Check that each channel gets updated exactly once
7465         assert!(chans_disabled.is_empty());
7466 }
7467
7468 #[test]
7469 fn test_bump_penalty_txn_on_revoked_commitment() {
7470         // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to be sure
7471         // we're able to claim outputs on revoked commitment transaction before timelocks expiration
7472
7473         let chanmon_cfgs = create_chanmon_cfgs(2);
7474         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7475         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7476         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7477
7478         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7479
7480         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
7481         let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 30)
7482                 .with_bolt11_features(nodes[0].node.bolt11_invoice_features()).unwrap();
7483         let (route,_, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], payment_params, 3000000);
7484         send_along_route(&nodes[1], route, &vec!(&nodes[0])[..], 3000000);
7485
7486         let revoked_txn = get_local_commitment_txn!(nodes[0], chan.2);
7487         // Revoked commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
7488         assert_eq!(revoked_txn[0].output.len(), 4);
7489         assert_eq!(revoked_txn[0].input.len(), 1);
7490         assert_eq!(revoked_txn[0].input[0].previous_output.txid, chan.3.txid());
7491         let revoked_txid = revoked_txn[0].txid();
7492
7493         let mut penalty_sum = 0;
7494         for outp in revoked_txn[0].output.iter() {
7495                 if outp.script_pubkey.is_p2wsh() {
7496                         penalty_sum += outp.value.to_sat();
7497                 }
7498         }
7499
7500         // Connect blocks to change height_timer range to see if we use right soonest_timelock
7501         let header_114 = connect_blocks(&nodes[1], 14);
7502
7503         // Actually revoke tx by claiming a HTLC
7504         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
7505         connect_block(&nodes[1], &create_dummy_block(header_114, 42, vec![revoked_txn[0].clone()]));
7506         check_added_monitors!(nodes[1], 1);
7507
7508         // One or more justice tx should have been broadcast, check it
7509         let penalty_1;
7510         let feerate_1;
7511         {
7512                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7513                 assert_eq!(node_txn.len(), 1); // justice tx (broadcasted from ChannelMonitor)
7514                 assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7515                 assert_eq!(node_txn[0].output.len(), 1);
7516                 check_spends!(node_txn[0], revoked_txn[0]);
7517                 let fee_1 = penalty_sum - node_txn[0].output[0].value.to_sat();
7518                 feerate_1 = fee_1 * 1000 / node_txn[0].weight().to_wu();
7519                 penalty_1 = node_txn[0].txid();
7520                 node_txn.clear();
7521         };
7522
7523         // After exhaustion of height timer, a new bumped justice tx should have been broadcast, check it
7524         connect_blocks(&nodes[1], 15);
7525         let mut penalty_2 = penalty_1;
7526         let mut feerate_2 = 0;
7527         {
7528                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7529                 assert_eq!(node_txn.len(), 1);
7530                 if node_txn[0].input[0].previous_output.txid == revoked_txid {
7531                         assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7532                         assert_eq!(node_txn[0].output.len(), 1);
7533                         check_spends!(node_txn[0], revoked_txn[0]);
7534                         penalty_2 = node_txn[0].txid();
7535                         // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
7536                         assert_ne!(penalty_2, penalty_1);
7537                         let fee_2 = penalty_sum - node_txn[0].output[0].value.to_sat();
7538                         feerate_2 = fee_2 * 1000 / node_txn[0].weight().to_wu();
7539                         // Verify 25% bump heuristic
7540                         assert!(feerate_2 * 100 >= feerate_1 * 125);
7541                         node_txn.clear();
7542                 }
7543         }
7544         assert_ne!(feerate_2, 0);
7545
7546         // After exhaustion of height timer for a 2nd time, a new bumped justice tx should have been broadcast, check it
7547         connect_blocks(&nodes[1], 1);
7548         let penalty_3;
7549         let mut feerate_3 = 0;
7550         {
7551                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7552                 assert_eq!(node_txn.len(), 1);
7553                 if node_txn[0].input[0].previous_output.txid == revoked_txid {
7554                         assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7555                         assert_eq!(node_txn[0].output.len(), 1);
7556                         check_spends!(node_txn[0], revoked_txn[0]);
7557                         penalty_3 = node_txn[0].txid();
7558                         // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
7559                         assert_ne!(penalty_3, penalty_2);
7560                         let fee_3 = penalty_sum - node_txn[0].output[0].value.to_sat();
7561                         feerate_3 = fee_3 * 1000 / node_txn[0].weight().to_wu();
7562                         // Verify 25% bump heuristic
7563                         assert!(feerate_3 * 100 >= feerate_2 * 125);
7564                         node_txn.clear();
7565                 }
7566         }
7567         assert_ne!(feerate_3, 0);
7568
7569         nodes[1].node.get_and_clear_pending_events();
7570         nodes[1].node.get_and_clear_pending_msg_events();
7571 }
7572
7573 #[test]
7574 fn test_bump_penalty_txn_on_revoked_htlcs() {
7575         // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to sure
7576         // we're able to claim outputs on revoked HTLC transactions before timelocks expiration
7577
7578         let mut chanmon_cfgs = create_chanmon_cfgs(2);
7579         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
7580         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7581         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7582         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7583
7584         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7585         // Lock HTLC in both directions (using a slightly lower CLTV delay to provide timely RBF bumps)
7586         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 50).with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
7587         let scorer = test_utils::TestScorer::new();
7588         let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
7589         let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
7590         let route = get_route(&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(), None,
7591                 nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
7592         let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 3_000_000).0;
7593         let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 50)
7594                 .with_bolt11_features(nodes[0].node.bolt11_invoice_features()).unwrap();
7595         let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
7596         let route = get_route(&nodes[1].node.get_our_node_id(), &route_params, &nodes[1].network_graph.read_only(), None,
7597                 nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
7598         let failed_payment_hash = send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000).1;
7599
7600         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
7601         assert_eq!(revoked_local_txn[0].input.len(), 1);
7602         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
7603
7604         // Revoke local commitment tx
7605         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
7606
7607         // B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx
7608         connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone()]));
7609         check_closed_broadcast!(nodes[1], true);
7610         check_added_monitors!(nodes[1], 1);
7611         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000);
7612         connect_blocks(&nodes[1], 50); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
7613
7614         let revoked_htlc_txn = {
7615                 let txn = nodes[1].tx_broadcaster.unique_txn_broadcast();
7616                 assert_eq!(txn.len(), 2);
7617
7618                 assert_eq!(txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
7619                 assert_eq!(txn[0].input.len(), 1);
7620                 check_spends!(txn[0], revoked_local_txn[0]);
7621
7622                 assert_eq!(txn[1].input.len(), 1);
7623                 assert_eq!(txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
7624                 assert_eq!(txn[1].output.len(), 1);
7625                 check_spends!(txn[1], revoked_local_txn[0]);
7626
7627                 txn
7628         };
7629
7630         // Broadcast set of revoked txn on A
7631         let hash_128 = connect_blocks(&nodes[0], 40);
7632         let block_11 = create_dummy_block(hash_128, 42, vec![revoked_local_txn[0].clone()]);
7633         connect_block(&nodes[0], &block_11);
7634         let block_129 = create_dummy_block(block_11.block_hash(), 42, vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()]);
7635         connect_block(&nodes[0], &block_129);
7636         let events = nodes[0].node.get_and_clear_pending_events();
7637         expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: failed_payment_hash }]);
7638         match events.last().unwrap() {
7639                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
7640                 _ => panic!("Unexpected event"),
7641         }
7642         let first;
7643         let feerate_1;
7644         let penalty_txn;
7645         {
7646                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7647                 assert_eq!(node_txn.len(), 4); // 3 penalty txn on revoked commitment tx + 1 penalty tnx on revoked HTLC txn
7648                 // Verify claim tx are spending revoked HTLC txn
7649
7650                 // node_txn 0-2 each spend a separate revoked output from revoked_local_txn[0]
7651                 // Note that node_txn[0] and node_txn[1] are bogus - they double spend the revoked_htlc_txn
7652                 // which are included in the same block (they are broadcasted because we scan the
7653                 // transactions linearly and generate claims as we go, they likely should be removed in the
7654                 // future).
7655                 assert_eq!(node_txn[0].input.len(), 1);
7656                 check_spends!(node_txn[0], revoked_local_txn[0]);
7657                 assert_eq!(node_txn[1].input.len(), 1);
7658                 check_spends!(node_txn[1], revoked_local_txn[0]);
7659                 assert_eq!(node_txn[2].input.len(), 1);
7660                 check_spends!(node_txn[2], revoked_local_txn[0]);
7661
7662                 // Each of the three justice transactions claim a separate (single) output of the three
7663                 // available, which we check here:
7664                 assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
7665                 assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output);
7666                 assert_ne!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output);
7667
7668                 assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[1].input[0].previous_output);
7669                 assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
7670
7671                 // node_txn[3] spends the revoked outputs from the revoked_htlc_txn (which only have one
7672                 // output, checked above).
7673                 assert_eq!(node_txn[3].input.len(), 2);
7674                 assert_eq!(node_txn[3].output.len(), 1);
7675                 check_spends!(node_txn[3], revoked_htlc_txn[0], revoked_htlc_txn[1]);
7676
7677                 first = node_txn[3].txid();
7678                 // Store both feerates for later comparison
7679                 let fee_1 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[3].output[0].value;
7680                 feerate_1 = fee_1 * 1000 / node_txn[3].weight().to_wu();
7681                 penalty_txn = vec![node_txn[2].clone()];
7682                 node_txn.clear();
7683         }
7684
7685         // Connect one more block to see if bumped penalty are issued for HTLC txn
7686         let block_130 = create_dummy_block(block_129.block_hash(), 42, penalty_txn);
7687         connect_block(&nodes[0], &block_130);
7688         let block_131 = create_dummy_block(block_130.block_hash(), 42, Vec::new());
7689         connect_block(&nodes[0], &block_131);
7690
7691         // Few more blocks to confirm penalty txn
7692         connect_blocks(&nodes[0], 4);
7693         assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
7694         let header_144 = connect_blocks(&nodes[0], 9);
7695         let node_txn = {
7696                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7697                 assert_eq!(node_txn.len(), 1);
7698
7699                 assert_eq!(node_txn[0].input.len(), 2);
7700                 check_spends!(node_txn[0], revoked_htlc_txn[0], revoked_htlc_txn[1]);
7701                 // Verify bumped tx is different and 25% bump heuristic
7702                 assert_ne!(first, node_txn[0].txid());
7703                 let fee_2 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[0].output[0].value;
7704                 let feerate_2 = fee_2 * 1000 / node_txn[0].weight().to_wu();
7705                 assert!(feerate_2 * 100 > feerate_1 * 125);
7706                 let txn = vec![node_txn[0].clone()];
7707                 node_txn.clear();
7708                 txn
7709         };
7710         // Broadcast claim txn and confirm blocks to avoid further bumps on this outputs
7711         connect_block(&nodes[0], &create_dummy_block(header_144, 42, node_txn));
7712         connect_blocks(&nodes[0], 20);
7713         {
7714                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7715                 // We verify than no new transaction has been broadcast because previously
7716                 // we were buggy on this exact behavior by not tracking for monitoring remote HTLC outputs (see #411)
7717                 // which means we wouldn't see a spend of them by a justice tx and bumped justice tx
7718                 // were generated forever instead of safe cleaning after confirmation and ANTI_REORG_SAFE_DELAY blocks.
7719                 // Enforce spending of revoked htlc output by claiming transaction remove request as expected and dry
7720                 // up bumped justice generation.
7721                 assert_eq!(node_txn.len(), 0);
7722                 node_txn.clear();
7723         }
7724         check_closed_broadcast!(nodes[0], true);
7725         check_added_monitors!(nodes[0], 1);
7726 }
7727
7728 #[test]
7729 fn test_bump_penalty_txn_on_remote_commitment() {
7730         // In case of claim txn with too low feerates for getting into mempools, RBF-bump them to be sure
7731         // we're able to claim outputs on remote commitment transaction before timelocks expiration
7732
7733         // Create 2 HTLCs
7734         // Provide preimage for one
7735         // Check aggregation
7736
7737         let chanmon_cfgs = create_chanmon_cfgs(2);
7738         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7739         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7740         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7741
7742         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7743         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
7744         route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
7745
7746         // Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
7747         let remote_txn = get_local_commitment_txn!(nodes[0], chan.2);
7748         assert_eq!(remote_txn[0].output.len(), 4);
7749         assert_eq!(remote_txn[0].input.len(), 1);
7750         assert_eq!(remote_txn[0].input[0].previous_output.txid, chan.3.txid());
7751
7752         // Claim a HTLC without revocation (provide B monitor with preimage)
7753         nodes[1].node.claim_funds(payment_preimage);
7754         expect_payment_claimed!(nodes[1], payment_hash, 3_000_000);
7755         mine_transaction(&nodes[1], &remote_txn[0]);
7756         check_added_monitors!(nodes[1], 2);
7757         connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
7758
7759         // One or more claim tx should have been broadcast, check it
7760         let timeout;
7761         let preimage;
7762         let preimage_bump;
7763         let feerate_timeout;
7764         let feerate_preimage;
7765         {
7766                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7767                 // 3 transactions including:
7768                 //   preimage and timeout sweeps from remote commitment + preimage sweep bump
7769                 assert_eq!(node_txn.len(), 3);
7770                 assert_eq!(node_txn[0].input.len(), 1);
7771                 assert_eq!(node_txn[1].input.len(), 1);
7772                 assert_eq!(node_txn[2].input.len(), 1);
7773                 check_spends!(node_txn[0], remote_txn[0]);
7774                 check_spends!(node_txn[1], remote_txn[0]);
7775                 check_spends!(node_txn[2], remote_txn[0]);
7776
7777                 preimage = node_txn[0].txid();
7778                 let index = node_txn[0].input[0].previous_output.vout;
7779                 let fee = remote_txn[0].output[index as usize].value.to_sat() - node_txn[0].output[0].value.to_sat();
7780                 feerate_preimage = fee * 1000 / node_txn[0].weight().to_wu();
7781
7782                 let (preimage_bump_tx, timeout_tx) = if node_txn[2].input[0].previous_output == node_txn[0].input[0].previous_output {
7783                         (node_txn[2].clone(), node_txn[1].clone())
7784                 } else {
7785                         (node_txn[1].clone(), node_txn[2].clone())
7786                 };
7787
7788                 preimage_bump = preimage_bump_tx;
7789                 check_spends!(preimage_bump, remote_txn[0]);
7790                 assert_eq!(node_txn[0].input[0].previous_output, preimage_bump.input[0].previous_output);
7791
7792                 timeout = timeout_tx.txid();
7793                 let index = timeout_tx.input[0].previous_output.vout;
7794                 let fee = remote_txn[0].output[index as usize].value.to_sat() - timeout_tx.output[0].value.to_sat();
7795                 feerate_timeout = fee * 1000 / timeout_tx.weight().to_wu();
7796
7797                 node_txn.clear();
7798         };
7799         assert_ne!(feerate_timeout, 0);
7800         assert_ne!(feerate_preimage, 0);
7801
7802         // After exhaustion of height timer, new bumped claim txn should have been broadcast, check it
7803         connect_blocks(&nodes[1], 1);
7804         {
7805                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7806                 assert_eq!(node_txn.len(), 1);
7807                 assert_eq!(node_txn[0].input.len(), 1);
7808                 assert_eq!(preimage_bump.input.len(), 1);
7809                 check_spends!(node_txn[0], remote_txn[0]);
7810                 check_spends!(preimage_bump, remote_txn[0]);
7811
7812                 let index = preimage_bump.input[0].previous_output.vout;
7813                 let fee = remote_txn[0].output[index as usize].value.to_sat() - preimage_bump.output[0].value.to_sat();
7814                 let new_feerate = fee * 1000 / preimage_bump.weight().to_wu();
7815                 assert!(new_feerate * 100 > feerate_timeout * 125);
7816                 assert_ne!(timeout, preimage_bump.txid());
7817
7818                 let index = node_txn[0].input[0].previous_output.vout;
7819                 let fee = remote_txn[0].output[index as usize].value.to_sat() - node_txn[0].output[0].value.to_sat();
7820                 let new_feerate = fee * 1000 / node_txn[0].weight().to_wu();
7821                 assert!(new_feerate * 100 > feerate_preimage * 125);
7822                 assert_ne!(preimage, node_txn[0].txid());
7823
7824                 node_txn.clear();
7825         }
7826
7827         nodes[1].node.get_and_clear_pending_events();
7828         nodes[1].node.get_and_clear_pending_msg_events();
7829 }
7830
7831 #[test]
7832 fn test_counterparty_raa_skip_no_crash() {
7833         // Previously, if our counterparty sent two RAAs in a row without us having provided a
7834         // commitment transaction, we would have happily carried on and provided them the next
7835         // commitment transaction based on one RAA forward. This would probably eventually have led to
7836         // channel closure, but it would not have resulted in funds loss. Still, our
7837         // TestChannelSigner would have panicked as it doesn't like jumps into the future. Here, we
7838         // check simply that the channel is closed in response to such an RAA, but don't check whether
7839         // we decide to punish our counterparty for revoking their funds (as we don't currently
7840         // implement that).
7841         let chanmon_cfgs = create_chanmon_cfgs(2);
7842         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7843         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7844         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7845         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
7846
7847         let per_commitment_secret;
7848         let next_per_commitment_point;
7849         {
7850                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
7851                 let mut guard = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
7852                 let keys = guard.channel_by_id.get_mut(&channel_id).map(
7853                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
7854                 ).flatten().unwrap().get_signer();
7855
7856                 const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
7857
7858                 // Make signer believe we got a counterparty signature, so that it allows the revocation
7859                 keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
7860                 per_commitment_secret = keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
7861
7862                 // Must revoke without gaps
7863                 keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
7864                 keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
7865
7866                 keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
7867                 next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
7868                         &SecretKey::from_slice(&keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
7869         }
7870
7871         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(),
7872                 &msgs::RevokeAndACK {
7873                         channel_id,
7874                         per_commitment_secret,
7875                         next_per_commitment_point,
7876                         #[cfg(taproot)]
7877                         next_local_nonce: None,
7878                 });
7879         assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
7880         check_added_monitors!(nodes[1], 1);
7881         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() }
7882                 , [nodes[0].node.get_our_node_id()], 100000);
7883 }
7884
7885 #[test]
7886 fn test_bump_txn_sanitize_tracking_maps() {
7887         // Sanitizing pendning_claim_request and claimable_outpoints used to be buggy,
7888         // verify we clean then right after expiration of ANTI_REORG_DELAY.
7889
7890         let chanmon_cfgs = create_chanmon_cfgs(2);
7891         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7892         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7893         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7894
7895         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7896         // Lock HTLC in both directions
7897         let (payment_preimage_1, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000);
7898         let (_, payment_hash_2, ..) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000);
7899
7900         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
7901         assert_eq!(revoked_local_txn[0].input.len(), 1);
7902         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
7903
7904         // Revoke local commitment tx
7905         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
7906
7907         // Broadcast set of revoked txn on A
7908         connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH);
7909         expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[0], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
7910         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
7911
7912         mine_transaction(&nodes[0], &revoked_local_txn[0]);
7913         check_closed_broadcast!(nodes[0], true);
7914         check_added_monitors!(nodes[0], 1);
7915         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 1000000);
7916         let penalty_txn = {
7917                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7918                 assert_eq!(node_txn.len(), 3); //ChannelMonitor: justice txn * 3
7919                 check_spends!(node_txn[0], revoked_local_txn[0]);
7920                 check_spends!(node_txn[1], revoked_local_txn[0]);
7921                 check_spends!(node_txn[2], revoked_local_txn[0]);
7922                 let penalty_txn = vec![node_txn[0].clone(), node_txn[1].clone(), node_txn[2].clone()];
7923                 node_txn.clear();
7924                 penalty_txn
7925         };
7926         connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, penalty_txn));
7927         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7928         {
7929                 let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(OutPoint { txid: chan.3.txid(), index: 0 }).unwrap();
7930                 assert!(monitor.inner.lock().unwrap().onchain_tx_handler.pending_claim_requests.is_empty());
7931                 assert!(monitor.inner.lock().unwrap().onchain_tx_handler.claimable_outpoints.is_empty());
7932         }
7933 }
7934
7935 #[test]
7936 fn test_channel_conf_timeout() {
7937         // Tests that, for inbound channels, we give up on them if the funding transaction does not
7938         // confirm within 2016 blocks, as recommended by BOLT 2.
7939         let chanmon_cfgs = create_chanmon_cfgs(2);
7940         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7941         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7942         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7943
7944         let _funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 100_000);
7945
7946         // The outbound node should wait forever for confirmation:
7947         // This matches `channel::FUNDING_CONF_DEADLINE_BLOCKS` and BOLT 2's suggested timeout, thus is
7948         // copied here instead of directly referencing the constant.
7949         connect_blocks(&nodes[0], 2016);
7950         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7951
7952         // The inbound node should fail the channel after exactly 2016 blocks
7953         connect_blocks(&nodes[1], 2015);
7954         check_added_monitors!(nodes[1], 0);
7955         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7956
7957         connect_blocks(&nodes[1], 1);
7958         check_added_monitors!(nodes[1], 1);
7959         check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut, [nodes[0].node.get_our_node_id()], 1000000);
7960         let close_ev = nodes[1].node.get_and_clear_pending_msg_events();
7961         assert_eq!(close_ev.len(), 1);
7962         match close_ev[0] {
7963                 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { ref msg }, ref node_id } => {
7964                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
7965                         assert_eq!(msg.as_ref().unwrap().data, "Channel closed because funding transaction failed to confirm within 2016 blocks");
7966                 },
7967                 _ => panic!("Unexpected event"),
7968         }
7969 }
7970
7971 #[test]
7972 fn test_override_channel_config() {
7973         let chanmon_cfgs = create_chanmon_cfgs(2);
7974         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7975         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7976         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7977
7978         // Node0 initiates a channel to node1 using the override config.
7979         let mut override_config = UserConfig::default();
7980         override_config.channel_handshake_config.our_to_self_delay = 200;
7981
7982         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, None, Some(override_config)).unwrap();
7983
7984         // Assert the channel created by node0 is using the override config.
7985         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7986         assert_eq!(res.common_fields.channel_flags, 0);
7987         assert_eq!(res.common_fields.to_self_delay, 200);
7988 }
7989
7990 #[test]
7991 fn test_override_0msat_htlc_minimum() {
7992         let mut zero_config = UserConfig::default();
7993         zero_config.channel_handshake_config.our_htlc_minimum_msat = 0;
7994         let chanmon_cfgs = create_chanmon_cfgs(2);
7995         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7996         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(zero_config.clone())]);
7997         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7998
7999         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, None, Some(zero_config)).unwrap();
8000         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8001         assert_eq!(res.common_fields.htlc_minimum_msat, 1);
8002
8003         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
8004         let res = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
8005         assert_eq!(res.common_fields.htlc_minimum_msat, 1);
8006 }
8007
8008 #[test]
8009 fn test_channel_update_has_correct_htlc_maximum_msat() {
8010         // Tests that the `ChannelUpdate` message has the correct values for `htlc_maximum_msat` set.
8011         // Bolt 7 specifies that if present `htlc_maximum_msat`:
8012         // 1. MUST be set to less than or equal to the channel capacity. In LDK, this is capped to
8013         // 90% of the `channel_value`.
8014         // 2. MUST be set to less than or equal to the `max_htlc_value_in_flight_msat` received from the peer.
8015
8016         let mut config_30_percent = UserConfig::default();
8017         config_30_percent.channel_handshake_config.announced_channel = true;
8018         config_30_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 30;
8019         let mut config_50_percent = UserConfig::default();
8020         config_50_percent.channel_handshake_config.announced_channel = true;
8021         config_50_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 50;
8022         let mut config_95_percent = UserConfig::default();
8023         config_95_percent.channel_handshake_config.announced_channel = true;
8024         config_95_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 95;
8025         let mut config_100_percent = UserConfig::default();
8026         config_100_percent.channel_handshake_config.announced_channel = true;
8027         config_100_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100;
8028
8029         let chanmon_cfgs = create_chanmon_cfgs(4);
8030         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
8031         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[Some(config_30_percent), Some(config_50_percent), Some(config_95_percent), Some(config_100_percent)]);
8032         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
8033
8034         let channel_value_satoshis = 100000;
8035         let channel_value_msat = channel_value_satoshis * 1000;
8036         let channel_value_30_percent_msat = (channel_value_msat as f64 * 0.3) as u64;
8037         let channel_value_50_percent_msat = (channel_value_msat as f64 * 0.5) as u64;
8038         let channel_value_90_percent_msat = (channel_value_msat as f64 * 0.9) as u64;
8039
8040         let (node_0_chan_update, node_1_chan_update, _, _)  = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value_satoshis, 10001);
8041         let (node_2_chan_update, node_3_chan_update, _, _)  = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, channel_value_satoshis, 10001);
8042
8043         // Assert that `node[0]`'s `ChannelUpdate` is capped at 50 percent of the `channel_value`, as
8044         // that's the value of `node[1]`'s `holder_max_htlc_value_in_flight_msat`.
8045         assert_eq!(node_0_chan_update.contents.htlc_maximum_msat, channel_value_50_percent_msat);
8046         // Assert that `node[1]`'s `ChannelUpdate` is capped at 30 percent of the `channel_value`, as
8047         // that's the value of `node[0]`'s `holder_max_htlc_value_in_flight_msat`.
8048         assert_eq!(node_1_chan_update.contents.htlc_maximum_msat, channel_value_30_percent_msat);
8049
8050         // Assert that `node[2]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
8051         // the value of `node[3]`'s `holder_max_htlc_value_in_flight_msat` (100%), exceeds 90% of the
8052         // `channel_value`.
8053         assert_eq!(node_2_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
8054         // Assert that `node[3]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
8055         // the value of `node[2]`'s `holder_max_htlc_value_in_flight_msat` (95%), exceeds 90% of the
8056         // `channel_value`.
8057         assert_eq!(node_3_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
8058 }
8059
8060 #[test]
8061 fn test_manually_accept_inbound_channel_request() {
8062         let mut manually_accept_conf = UserConfig::default();
8063         manually_accept_conf.manually_accept_inbound_channels = true;
8064         let chanmon_cfgs = create_chanmon_cfgs(2);
8065         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8066         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
8067         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8068
8069         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap();
8070         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8071
8072         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
8073
8074         // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
8075         // accepting the inbound channel request.
8076         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
8077
8078         let events = nodes[1].node.get_and_clear_pending_events();
8079         match events[0] {
8080                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
8081                         nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23).unwrap();
8082                 }
8083                 _ => panic!("Unexpected event"),
8084         }
8085
8086         let accept_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
8087         assert_eq!(accept_msg_ev.len(), 1);
8088
8089         match accept_msg_ev[0] {
8090                 MessageSendEvent::SendAcceptChannel { ref node_id, .. } => {
8091                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8092                 }
8093                 _ => panic!("Unexpected event"),
8094         }
8095         let error_message = "Channel force-closed";
8096         nodes[1].node.force_close_broadcasting_latest_txn(&temp_channel_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
8097
8098         let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
8099         assert_eq!(close_msg_ev.len(), 1);
8100
8101         let events = nodes[1].node.get_and_clear_pending_events();
8102         match events[0] {
8103                 Event::ChannelClosed { user_channel_id, .. } => {
8104                         assert_eq!(user_channel_id, 23);
8105                 }
8106                 _ => panic!("Unexpected event"),
8107         }
8108 }
8109
8110 #[test]
8111 fn test_manually_reject_inbound_channel_request() {
8112         let mut manually_accept_conf = UserConfig::default();
8113         manually_accept_conf.manually_accept_inbound_channels = true;
8114         let chanmon_cfgs = create_chanmon_cfgs(2);
8115         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8116         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
8117         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8118
8119         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap();
8120         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8121
8122         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
8123
8124         // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
8125         // rejecting the inbound channel request.
8126         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
8127         let error_message = "Channel force-closed";
8128         let events = nodes[1].node.get_and_clear_pending_events();
8129         match events[0] {
8130                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
8131                         nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
8132                 }
8133                 _ => panic!("Unexpected event"),
8134         }
8135
8136         let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
8137         assert_eq!(close_msg_ev.len(), 1);
8138
8139         match close_msg_ev[0] {
8140                 MessageSendEvent::HandleError { ref node_id, .. } => {
8141                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8142                 }
8143                 _ => panic!("Unexpected event"),
8144         }
8145
8146         // There should be no more events to process, as the channel was never opened.
8147         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
8148 }
8149
8150 #[test]
8151 fn test_can_not_accept_inbound_channel_twice() {
8152         let mut manually_accept_conf = UserConfig::default();
8153         manually_accept_conf.manually_accept_inbound_channels = true;
8154         let chanmon_cfgs = create_chanmon_cfgs(2);
8155         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8156         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
8157         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8158
8159         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap();
8160         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8161
8162         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
8163
8164         // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
8165         // accepting the inbound channel request.
8166         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
8167
8168         let events = nodes[1].node.get_and_clear_pending_events();
8169         match events[0] {
8170                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
8171                         nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
8172                         let api_res = nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0);
8173                         match api_res {
8174                                 Err(APIError::APIMisuseError { err }) => {
8175                                         assert_eq!(err, "No such channel awaiting to be accepted.");
8176                                 },
8177                                 Ok(_) => panic!("Channel shouldn't be possible to be accepted twice"),
8178                                 Err(e) => panic!("Unexpected Error {:?}", e),
8179                         }
8180                 }
8181                 _ => panic!("Unexpected event"),
8182         }
8183
8184         // Ensure that the channel wasn't closed after attempting to accept it twice.
8185         let accept_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
8186         assert_eq!(accept_msg_ev.len(), 1);
8187
8188         match accept_msg_ev[0] {
8189                 MessageSendEvent::SendAcceptChannel { ref node_id, .. } => {
8190                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8191                 }
8192                 _ => panic!("Unexpected event"),
8193         }
8194 }
8195
8196 #[test]
8197 fn test_can_not_accept_unknown_inbound_channel() {
8198         let chanmon_cfg = create_chanmon_cfgs(2);
8199         let node_cfg = create_node_cfgs(2, &chanmon_cfg);
8200         let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
8201         let nodes = create_network(2, &node_cfg, &node_chanmgr);
8202
8203         let unknown_channel_id = ChannelId::new_zero();
8204         let api_res = nodes[0].node.accept_inbound_channel(&unknown_channel_id, &nodes[1].node.get_our_node_id(), 0);
8205         match api_res {
8206                 Err(APIError::APIMisuseError { err }) => {
8207                         assert_eq!(err, "No such channel awaiting to be accepted.");
8208                 },
8209                 Ok(_) => panic!("It shouldn't be possible to accept an unkown channel"),
8210                 Err(e) => panic!("Unexpected Error: {:?}", e),
8211         }
8212 }
8213
8214 #[test]
8215 fn test_onion_value_mpp_set_calculation() {
8216         // Test that we use the onion value `amt_to_forward` when
8217         // calculating whether we've reached the `total_msat` of an MPP
8218         // by having a routing node forward more than `amt_to_forward`
8219         // and checking that the receiving node doesn't generate
8220         // a PaymentClaimable event too early
8221         let node_count = 4;
8222         let chanmon_cfgs = create_chanmon_cfgs(node_count);
8223         let node_cfgs = create_node_cfgs(node_count, &chanmon_cfgs);
8224         let node_chanmgrs = create_node_chanmgrs(node_count, &node_cfgs, &vec![None; node_count]);
8225         let mut nodes = create_network(node_count, &node_cfgs, &node_chanmgrs);
8226
8227         let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8228         let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
8229         let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
8230         let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
8231
8232         let total_msat = 100_000;
8233         let expected_paths: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]];
8234         let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], total_msat);
8235         let sample_path = route.paths.pop().unwrap();
8236
8237         let mut path_1 = sample_path.clone();
8238         path_1.hops[0].pubkey = nodes[1].node.get_our_node_id();
8239         path_1.hops[0].short_channel_id = chan_1_id;
8240         path_1.hops[1].pubkey = nodes[3].node.get_our_node_id();
8241         path_1.hops[1].short_channel_id = chan_3_id;
8242         path_1.hops[1].fee_msat = 100_000;
8243         route.paths.push(path_1);
8244
8245         let mut path_2 = sample_path.clone();
8246         path_2.hops[0].pubkey = nodes[2].node.get_our_node_id();
8247         path_2.hops[0].short_channel_id = chan_2_id;
8248         path_2.hops[1].pubkey = nodes[3].node.get_our_node_id();
8249         path_2.hops[1].short_channel_id = chan_4_id;
8250         path_2.hops[1].fee_msat = 1_000;
8251         route.paths.push(path_2);
8252
8253         // Send payment
8254         let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes());
8255         let onion_session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
8256                 RecipientOnionFields::secret_only(our_payment_secret), payment_id, &route).unwrap();
8257         nodes[0].node.test_send_payment_internal(&route, our_payment_hash,
8258                 RecipientOnionFields::secret_only(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap();
8259         check_added_monitors!(nodes[0], expected_paths.len());
8260
8261         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8262         assert_eq!(events.len(), expected_paths.len());
8263
8264         // First path
8265         let ev = remove_first_msg_event_to_node(&expected_paths[0][0].node.get_our_node_id(), &mut events);
8266         let mut payment_event = SendEvent::from_event(ev);
8267         let mut prev_node = &nodes[0];
8268
8269         for (idx, &node) in expected_paths[0].iter().enumerate() {
8270                 assert_eq!(node.node.get_our_node_id(), payment_event.node_id);
8271
8272                 if idx == 0 { // routing node
8273                         let session_priv = [3; 32];
8274                         let height = nodes[0].best_block_info().1;
8275                         let session_priv = SecretKey::from_slice(&session_priv).unwrap();
8276                         let mut onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
8277                         let recipient_onion_fields = RecipientOnionFields::secret_only(our_payment_secret);
8278                         let (mut onion_payloads, _, _) = onion_utils::build_onion_payloads(&route.paths[0], 100_000,
8279                                 &recipient_onion_fields, height + 1, &None).unwrap();
8280                         // Edit amt_to_forward to simulate the sender having set
8281                         // the final amount and the routing node taking less fee
8282                         if let msgs::OutboundOnionPayload::Receive {
8283                                 ref mut sender_intended_htlc_amt_msat, ..
8284                         } = onion_payloads[1] {
8285                                 *sender_intended_htlc_amt_msat = 99_000;
8286                         } else { panic!() }
8287                         let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap();
8288                         payment_event.msgs[0].onion_routing_packet = new_onion_packet;
8289                 }
8290
8291                 node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]);
8292                 check_added_monitors!(node, 0);
8293                 commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false);
8294                 expect_pending_htlcs_forwardable!(node);
8295
8296                 if idx == 0 {
8297                         let mut events_2 = node.node.get_and_clear_pending_msg_events();
8298                         assert_eq!(events_2.len(), 1);
8299                         check_added_monitors!(node, 1);
8300                         payment_event = SendEvent::from_event(events_2.remove(0));
8301                         assert_eq!(payment_event.msgs.len(), 1);
8302                 } else {
8303                         let events_2 = node.node.get_and_clear_pending_events();
8304                         assert!(events_2.is_empty());
8305                 }
8306
8307                 prev_node = node;
8308         }
8309
8310         // Second path
8311         let ev = remove_first_msg_event_to_node(&expected_paths[1][0].node.get_our_node_id(), &mut events);
8312         pass_along_path(&nodes[0], expected_paths[1], 101_000, our_payment_hash.clone(), Some(our_payment_secret), ev, true, None);
8313
8314         claim_payment_along_route(
8315                 ClaimAlongRouteArgs::new(&nodes[0], expected_paths, our_payment_preimage)
8316         );
8317 }
8318
8319 fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64) {
8320
8321         let routing_node_count = msat_amounts.len();
8322         let node_count = routing_node_count + 2;
8323
8324         let chanmon_cfgs = create_chanmon_cfgs(node_count);
8325         let node_cfgs = create_node_cfgs(node_count, &chanmon_cfgs);
8326         let node_chanmgrs = create_node_chanmgrs(node_count, &node_cfgs, &vec![None; node_count]);
8327         let nodes = create_network(node_count, &node_cfgs, &node_chanmgrs);
8328
8329         let src_idx = 0;
8330         let dst_idx = 1;
8331
8332         // Create channels for each amount
8333         let mut expected_paths = Vec::with_capacity(routing_node_count);
8334         let mut src_chan_ids = Vec::with_capacity(routing_node_count);
8335         let mut dst_chan_ids = Vec::with_capacity(routing_node_count);
8336         for i in 0..routing_node_count {
8337                 let routing_node = 2 + i;
8338                 let src_chan_id = create_announced_chan_between_nodes(&nodes, src_idx, routing_node).0.contents.short_channel_id;
8339                 src_chan_ids.push(src_chan_id);
8340                 let dst_chan_id = create_announced_chan_between_nodes(&nodes, routing_node, dst_idx).0.contents.short_channel_id;
8341                 dst_chan_ids.push(dst_chan_id);
8342                 let path = vec![&nodes[routing_node], &nodes[dst_idx]];
8343                 expected_paths.push(path);
8344         }
8345         let expected_paths: Vec<&[&Node]> = expected_paths.iter().map(|route| route.as_slice()).collect();
8346
8347         // Create a route for each amount
8348         let example_amount = 100000;
8349         let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[src_idx], nodes[dst_idx], example_amount);
8350         let sample_path = route.paths.pop().unwrap();
8351         for i in 0..routing_node_count {
8352                 let routing_node = 2 + i;
8353                 let mut path = sample_path.clone();
8354                 path.hops[0].pubkey = nodes[routing_node].node.get_our_node_id();
8355                 path.hops[0].short_channel_id = src_chan_ids[i];
8356                 path.hops[1].pubkey = nodes[dst_idx].node.get_our_node_id();
8357                 path.hops[1].short_channel_id = dst_chan_ids[i];
8358                 path.hops[1].fee_msat = msat_amounts[i];
8359                 route.paths.push(path);
8360         }
8361
8362         // Send payment with manually set total_msat
8363         let payment_id = PaymentId(nodes[src_idx].keys_manager.backing.get_secure_random_bytes());
8364         let onion_session_privs = nodes[src_idx].node.test_add_new_pending_payment(our_payment_hash,
8365                 RecipientOnionFields::secret_only(our_payment_secret), payment_id, &route).unwrap();
8366         nodes[src_idx].node.test_send_payment_internal(&route, our_payment_hash,
8367                 RecipientOnionFields::secret_only(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap();
8368         check_added_monitors!(nodes[src_idx], expected_paths.len());
8369
8370         let mut events = nodes[src_idx].node.get_and_clear_pending_msg_events();
8371         assert_eq!(events.len(), expected_paths.len());
8372         let mut amount_received = 0;
8373         for (path_idx, expected_path) in expected_paths.iter().enumerate() {
8374                 let ev = remove_first_msg_event_to_node(&expected_path[0].node.get_our_node_id(), &mut events);
8375
8376                 let current_path_amount = msat_amounts[path_idx];
8377                 amount_received += current_path_amount;
8378                 let became_claimable_now = amount_received >= total_msat && amount_received - current_path_amount < total_msat;
8379                 pass_along_path(&nodes[src_idx], expected_path, amount_received, our_payment_hash.clone(), Some(our_payment_secret), ev, became_claimable_now, None);
8380         }
8381
8382         claim_payment_along_route(
8383                 ClaimAlongRouteArgs::new(&nodes[src_idx], &expected_paths, our_payment_preimage)
8384         );
8385 }
8386
8387 #[test]
8388 fn test_overshoot_mpp() {
8389         do_test_overshoot_mpp(&[100_000, 101_000], 200_000);
8390         do_test_overshoot_mpp(&[100_000, 10_000, 100_000], 200_000);
8391 }
8392
8393 #[test]
8394 fn test_simple_mpp() {
8395         // Simple test of sending a multi-path payment.
8396         let chanmon_cfgs = create_chanmon_cfgs(4);
8397         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
8398         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
8399         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
8400
8401         let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8402         let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
8403         let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
8404         let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
8405
8406         let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
8407         let path = route.paths[0].clone();
8408         route.paths.push(path);
8409         route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id();
8410         route.paths[0].hops[0].short_channel_id = chan_1_id;
8411         route.paths[0].hops[1].short_channel_id = chan_3_id;
8412         route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id();
8413         route.paths[1].hops[0].short_channel_id = chan_2_id;
8414         route.paths[1].hops[1].short_channel_id = chan_4_id;
8415         send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret);
8416         claim_payment_along_route(
8417                 ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], payment_preimage)
8418         );
8419 }
8420
8421 #[test]
8422 fn test_preimage_storage() {
8423         // Simple test of payment preimage storage allowing no client-side storage to claim payments
8424         let chanmon_cfgs = create_chanmon_cfgs(2);
8425         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8426         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8427         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8428
8429         create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8430
8431         {
8432                 let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 7200, None).unwrap();
8433                 let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
8434                 nodes[0].node.send_payment_with_route(&route, payment_hash,
8435                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
8436                 check_added_monitors!(nodes[0], 1);
8437                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8438                 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
8439                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8440                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8441         }
8442         // Note that after leaving the above scope we have no knowledge of any arguments or return
8443         // values from previous calls.
8444         expect_pending_htlcs_forwardable!(nodes[1]);
8445         let events = nodes[1].node.get_and_clear_pending_events();
8446         assert_eq!(events.len(), 1);
8447         match events[0] {
8448                 Event::PaymentClaimable { ref purpose, .. } => {
8449                         match &purpose {
8450                                 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, .. } => {
8451                                         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage.unwrap());
8452                                 },
8453                                 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
8454                         }
8455                 },
8456                 _ => panic!("Unexpected event"),
8457         }
8458 }
8459
8460 #[test]
8461 fn test_bad_secret_hash() {
8462         // Simple test of unregistered payment hash/invalid payment secret handling
8463         let chanmon_cfgs = create_chanmon_cfgs(2);
8464         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8465         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8466         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8467
8468         create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8469
8470         let random_payment_hash = PaymentHash([42; 32]);
8471         let random_payment_secret = PaymentSecret([43; 32]);
8472         let (our_payment_hash, our_payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 2, None).unwrap();
8473         let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
8474
8475         // All the below cases should end up being handled exactly identically, so we macro the
8476         // resulting events.
8477         macro_rules! handle_unknown_invalid_payment_data {
8478                 ($payment_hash: expr) => {
8479                         check_added_monitors!(nodes[0], 1);
8480                         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8481                         let payment_event = SendEvent::from_event(events.pop().unwrap());
8482                         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8483                         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8484
8485                         // We have to forward pending HTLCs once to process the receipt of the HTLC and then
8486                         // again to process the pending backwards-failure of the HTLC
8487                         expect_pending_htlcs_forwardable!(nodes[1]);
8488                         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment{ payment_hash: $payment_hash }]);
8489                         check_added_monitors!(nodes[1], 1);
8490
8491                         // We should fail the payment back
8492                         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
8493                         match events.pop().unwrap() {
8494                                 MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } } => {
8495                                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
8496                                         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
8497                                 },
8498                                 _ => panic!("Unexpected event"),
8499                         }
8500                 }
8501         }
8502
8503         let expected_error_code = 0x4000|15; // incorrect_or_unknown_payment_details
8504         // Error data is the HTLC value (100,000) and current block height
8505         let expected_error_data = [0, 0, 0, 0, 0, 1, 0x86, 0xa0, 0, 0, 0, CHAN_CONFIRM_DEPTH as u8];
8506
8507         // Send a payment with the right payment hash but the wrong payment secret
8508         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
8509                 RecipientOnionFields::secret_only(random_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
8510         handle_unknown_invalid_payment_data!(our_payment_hash);
8511         expect_payment_failed!(nodes[0], our_payment_hash, true, expected_error_code, expected_error_data);
8512
8513         // Send a payment with a random payment hash, but the right payment secret
8514         nodes[0].node.send_payment_with_route(&route, random_payment_hash,
8515                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(random_payment_hash.0)).unwrap();
8516         handle_unknown_invalid_payment_data!(random_payment_hash);
8517         expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
8518
8519         // Send a payment with a random payment hash and random payment secret
8520         nodes[0].node.send_payment_with_route(&route, random_payment_hash,
8521                 RecipientOnionFields::secret_only(random_payment_secret), PaymentId(random_payment_hash.0)).unwrap();
8522         handle_unknown_invalid_payment_data!(random_payment_hash);
8523         expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
8524 }
8525
8526 #[test]
8527 fn test_update_err_monitor_lockdown() {
8528         // Our monitor will lock update of local commitment transaction if a broadcastion condition
8529         // has been fulfilled (either force-close from Channel or block height requiring a HTLC-
8530         // timeout). Trying to update monitor after lockdown should return a ChannelMonitorUpdateStatus
8531         // error.
8532         //
8533         // This scenario may happen in a watchtower setup, where watchtower process a block height
8534         // triggering a timeout while a slow-block-processing ChannelManager receives a local signed
8535         // commitment at same time.
8536
8537         let chanmon_cfgs = create_chanmon_cfgs(2);
8538         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8539         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8540         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8541
8542         // Create some initial channel
8543         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
8544         let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 };
8545
8546         // Rebalance the network to generate htlc in the two directions
8547         send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
8548
8549         // Route a HTLC from node 0 to node 1 (but don't settle)
8550         let (preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
8551
8552         // Copy ChainMonitor to simulate a watchtower and update block height of node 0 until its ChannelMonitor timeout HTLC onchain
8553         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8554         let logger = test_utils::TestLogger::with_id(format!("node {}", 0));
8555         let persister = test_utils::TestPersister::new();
8556         let watchtower = {
8557                 let new_monitor = {
8558                         let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8559                         let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
8560                                         &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8561                         assert!(new_monitor == *monitor);
8562                         new_monitor
8563                 };
8564                 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8565                 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
8566                 watchtower
8567         };
8568         let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new());
8569         // Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating
8570         // transaction lock time requirements here.
8571         chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize(200, (block.clone(), 200));
8572         watchtower.chain_monitor.block_connected(&block, 200);
8573
8574         // Try to update ChannelMonitor
8575         nodes[1].node.claim_funds(preimage);
8576         check_added_monitors!(nodes[1], 1);
8577         expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
8578
8579         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8580         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
8581         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
8582         {
8583                 let mut node_0_per_peer_lock;
8584                 let mut node_0_peer_state_lock;
8585                 if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
8586                         if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
8587                                 assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress);
8588                                 assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8589                         } else { assert!(false); }
8590                 } else {
8591                         assert!(false);
8592                 }
8593         }
8594         // Our local monitor is in-sync and hasn't processed yet timeout
8595         check_added_monitors!(nodes[0], 1);
8596         let events = nodes[0].node.get_and_clear_pending_events();
8597         assert_eq!(events.len(), 1);
8598 }
8599
8600 #[test]
8601 fn test_concurrent_monitor_claim() {
8602         // Watchtower A receives block, broadcasts state N, then channel receives new state N+1,
8603         // sending it to both watchtowers, Bob accepts N+1, then receives block and broadcasts
8604         // the latest state N+1, Alice rejects state N+1, but Bob has already broadcast it,
8605         // state N+1 confirms. Alice claims output from state N+1.
8606
8607         let chanmon_cfgs = create_chanmon_cfgs(2);
8608         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8609         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8610         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8611
8612         // Create some initial channel
8613         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
8614         let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 };
8615
8616         // Rebalance the network to generate htlc in the two directions
8617         send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
8618
8619         // Route a HTLC from node 0 to node 1 (but don't settle)
8620         route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
8621
8622         // Copy ChainMonitor to simulate watchtower Alice and update block height her ChannelMonitor timeout HTLC onchain
8623         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8624         let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice"));
8625         let persister = test_utils::TestPersister::new();
8626         let alice_broadcaster = test_utils::TestBroadcaster::with_blocks(
8627                 Arc::new(Mutex::new(nodes[0].blocks.lock().unwrap().clone())),
8628         );
8629         let watchtower_alice = {
8630                 let new_monitor = {
8631                         let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8632                         let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
8633                                         &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8634                         assert!(new_monitor == *monitor);
8635                         new_monitor
8636                 };
8637                 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &alice_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8638                 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
8639                 watchtower
8640         };
8641         let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new());
8642         // Make Alice aware of enough blocks that it doesn't think we're violating transaction lock time
8643         // requirements here.
8644         const HTLC_TIMEOUT_BROADCAST: u32 = CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS;
8645         alice_broadcaster.blocks.lock().unwrap().resize((HTLC_TIMEOUT_BROADCAST) as usize, (block.clone(), HTLC_TIMEOUT_BROADCAST));
8646         watchtower_alice.chain_monitor.block_connected(&block, HTLC_TIMEOUT_BROADCAST);
8647
8648         // Watchtower Alice should have broadcast a commitment/HTLC-timeout
8649         {
8650                 let mut txn = alice_broadcaster.txn_broadcast();
8651                 assert_eq!(txn.len(), 2);
8652                 check_spends!(txn[0], chan_1.3);
8653                 check_spends!(txn[1], txn[0]);
8654         };
8655
8656         // Copy ChainMonitor to simulate watchtower Bob and make it receive a commitment update first.
8657         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8658         let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob"));
8659         let persister = test_utils::TestPersister::new();
8660         let bob_broadcaster = test_utils::TestBroadcaster::with_blocks(Arc::clone(&alice_broadcaster.blocks));
8661         let watchtower_bob = {
8662                 let new_monitor = {
8663                         let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8664                         let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
8665                                         &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8666                         assert!(new_monitor == *monitor);
8667                         new_monitor
8668                 };
8669                 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &bob_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8670                 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
8671                 watchtower
8672         };
8673         watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST - 1);
8674
8675         // Route another payment to generate another update with still previous HTLC pending
8676         let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 3000000);
8677         nodes[1].node.send_payment_with_route(&route, payment_hash,
8678                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
8679         check_added_monitors!(nodes[1], 1);
8680
8681         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8682         assert_eq!(updates.update_add_htlcs.len(), 1);
8683         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]);
8684         {
8685                 let mut node_0_per_peer_lock;
8686                 let mut node_0_peer_state_lock;
8687                 if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
8688                         if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
8689                                 // Watchtower Alice should already have seen the block and reject the update
8690                                 assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress);
8691                                 assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8692                                 assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8693                         } else { assert!(false); }
8694                 } else {
8695                         assert!(false);
8696                 }
8697         }
8698         // Our local monitor is in-sync and hasn't processed yet timeout
8699         check_added_monitors!(nodes[0], 1);
8700
8701         //// Provide one more block to watchtower Bob, expect broadcast of commitment and HTLC-Timeout
8702         watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST);
8703
8704         // Watchtower Bob should have broadcast a commitment/HTLC-timeout
8705         let bob_state_y;
8706         {
8707                 let mut txn = bob_broadcaster.txn_broadcast();
8708                 assert_eq!(txn.len(), 2);
8709                 bob_state_y = txn.remove(0);
8710         };
8711
8712         // We confirm Bob's state Y on Alice, she should broadcast a HTLC-timeout
8713         let height = HTLC_TIMEOUT_BROADCAST + 1;
8714         connect_blocks(&nodes[0], height - nodes[0].best_block_info().1);
8715         check_closed_broadcast(&nodes[0], 1, true);
8716         check_closed_event!(&nodes[0], 1, ClosureReason::HTLCsTimedOut, false,
8717                 [nodes[1].node.get_our_node_id()], 100000);
8718         watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height);
8719         check_added_monitors(&nodes[0], 1);
8720         {
8721                 let htlc_txn = alice_broadcaster.txn_broadcast();
8722                 assert_eq!(htlc_txn.len(), 1);
8723                 check_spends!(htlc_txn[0], bob_state_y);
8724         }
8725 }
8726
8727 #[test]
8728 fn test_pre_lockin_no_chan_closed_update() {
8729         // Test that if a peer closes a channel in response to a funding_created message we don't
8730         // generate a channel update (as the channel cannot appear on chain without a funding_signed
8731         // message).
8732         //
8733         // Doing so would imply a channel monitor update before the initial channel monitor
8734         // registration, violating our API guarantees.
8735         //
8736         // Previously, full_stack_target managed to hit this case by opening then closing a channel,
8737         // then opening a second channel with the same funding output as the first (which is not
8738         // rejected because the first channel does not exist in the ChannelManager) and closing it
8739         // before receiving funding_signed.
8740         let chanmon_cfgs = create_chanmon_cfgs(2);
8741         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8742         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8743         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8744
8745         // Create an initial channel
8746         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
8747         let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8748         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
8749         let accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
8750         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
8751
8752         // Move the first channel through the funding flow...
8753         let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
8754
8755         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
8756         check_added_monitors!(nodes[0], 0);
8757
8758         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
8759         let channel_id = ChannelId::v1_from_funding_outpoint(crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index });
8760         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
8761         assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
8762         check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true,
8763                 [nodes[1].node.get_our_node_id()], 100000);
8764 }
8765
8766 #[test]
8767 fn test_htlc_no_detection() {
8768         // This test is a mutation to underscore the detection logic bug we had
8769         // before #653. HTLC value routed is above the remaining balance, thus
8770         // inverting HTLC and `to_remote` output. HTLC will come second and
8771         // it wouldn't be seen by pre-#653 detection as we were enumerate()'ing
8772         // on a watched outputs vector (Vec<TxOut>) thus implicitly relying on
8773         // outputs order detection for correct spending children filtring.
8774
8775         let chanmon_cfgs = create_chanmon_cfgs(2);
8776         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8777         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8778         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8779
8780         // Create some initial channels
8781         let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
8782
8783         send_payment(&nodes[0], &vec!(&nodes[1])[..], 1_000_000);
8784         let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 2_000_000);
8785         let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
8786         assert_eq!(local_txn[0].input.len(), 1);
8787         assert_eq!(local_txn[0].output.len(), 3);
8788         check_spends!(local_txn[0], chan_1.3);
8789
8790         // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
8791         let block = create_dummy_block(nodes[0].best_block_hash(), 42, vec![local_txn[0].clone()]);
8792         connect_block(&nodes[0], &block);
8793         // We deliberately connect the local tx twice as this should provoke a failure calling
8794         // this test before #653 fix.
8795         chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &block, nodes[0].best_block_info().1 + 1);
8796         check_closed_broadcast!(nodes[0], true);
8797         check_added_monitors!(nodes[0], 1);
8798         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
8799         connect_blocks(&nodes[0], TEST_FINAL_CLTV);
8800
8801         let htlc_timeout = {
8802                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8803                 assert_eq!(node_txn.len(), 1);
8804                 assert_eq!(node_txn[0].input.len(), 1);
8805                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
8806                 check_spends!(node_txn[0], local_txn[0]);
8807                 node_txn[0].clone()
8808         };
8809
8810         connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![htlc_timeout.clone()]));
8811         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
8812         expect_payment_failed!(nodes[0], our_payment_hash, false);
8813 }
8814
8815 fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain_before_fulfill: bool) {
8816         // If we route an HTLC, then learn the HTLC's preimage after the upstream channel has been
8817         // force-closed, we must claim that HTLC on-chain. (Given an HTLC forwarded from Alice --> Bob -->
8818         // Carol, Alice would be the upstream node, and Carol the downstream.)
8819         //
8820         // Steps of the test:
8821         // 1) Alice sends a HTLC to Carol through Bob.
8822         // 2) Carol doesn't settle the HTLC.
8823         // 3) If broadcast_alice is true, Alice force-closes her channel with Bob. Else Bob force closes.
8824         // Steps 4 and 5 may be reordered depending on go_onchain_before_fulfill.
8825         // 4) Bob sees the Alice's commitment on his chain or vice versa. An offered output is present
8826         //    but can't be claimed as Bob doesn't have yet knowledge of the preimage.
8827         // 5) Carol release the preimage to Bob off-chain.
8828         // 6) Bob claims the offered output on the broadcasted commitment.
8829         let chanmon_cfgs = create_chanmon_cfgs(3);
8830         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
8831         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
8832         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
8833
8834         // Create some initial channels
8835         let chan_ab = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
8836         create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 10001);
8837
8838         // Steps (1) and (2):
8839         // Send an HTLC Alice --> Bob --> Carol, but Carol doesn't settle the HTLC back.
8840         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
8841
8842         // Check that Alice's commitment transaction now contains an output for this HTLC.
8843         let alice_txn = get_local_commitment_txn!(nodes[0], chan_ab.2);
8844         check_spends!(alice_txn[0], chan_ab.3);
8845         assert_eq!(alice_txn[0].output.len(), 2);
8846         check_spends!(alice_txn[1], alice_txn[0]); // 2nd transaction is a non-final HTLC-timeout
8847         assert_eq!(alice_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
8848         assert_eq!(alice_txn.len(), 2);
8849
8850         // Steps (3) and (4):
8851         // If `go_onchain_before_fufill`, broadcast the relevant commitment transaction and check that Bob
8852         // responds by (1) broadcasting a channel update and (2) adding a new ChannelMonitor.
8853         let mut force_closing_node = 0; // Alice force-closes
8854         let mut counterparty_node = 1; // Bob if Alice force-closes
8855
8856         // Bob force-closes
8857         if !broadcast_alice {
8858                 force_closing_node = 1;
8859                 counterparty_node = 0;
8860         }
8861         let error_message = "Channel force-closed";
8862         nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id(), error_message.to_string()).unwrap();
8863         check_closed_broadcast!(nodes[force_closing_node], true);
8864         check_added_monitors!(nodes[force_closing_node], 1);
8865         check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[counterparty_node].node.get_our_node_id()], 100000);
8866         if go_onchain_before_fulfill {
8867                 let txn_to_broadcast = match broadcast_alice {
8868                         true => alice_txn.clone(),
8869                         false => get_local_commitment_txn!(nodes[1], chan_ab.2)
8870                 };
8871                 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()]));
8872                 if broadcast_alice {
8873                         check_closed_broadcast!(nodes[1], true);
8874                         check_added_monitors!(nodes[1], 1);
8875                         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
8876                 }
8877         }
8878
8879         // Step (5):
8880         // Carol then claims the funds and sends an update_fulfill message to Bob, and they go through the
8881         // process of removing the HTLC from their commitment transactions.
8882         nodes[2].node.claim_funds(payment_preimage);
8883         check_added_monitors!(nodes[2], 1);
8884         expect_payment_claimed!(nodes[2], payment_hash, 3_000_000);
8885
8886         let carol_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
8887         assert!(carol_updates.update_add_htlcs.is_empty());
8888         assert!(carol_updates.update_fail_htlcs.is_empty());
8889         assert!(carol_updates.update_fail_malformed_htlcs.is_empty());
8890         assert!(carol_updates.update_fee.is_none());
8891         assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1);
8892
8893         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]);
8894         let went_onchain = go_onchain_before_fulfill || force_closing_node == 1;
8895         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], if went_onchain { None } else { Some(1000) }, went_onchain, false);
8896         // If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage.
8897         if !go_onchain_before_fulfill && broadcast_alice {
8898                 let events = nodes[1].node.get_and_clear_pending_msg_events();
8899                 assert_eq!(events.len(), 1);
8900                 match events[0] {
8901                         MessageSendEvent::UpdateHTLCs { ref node_id, .. } => {
8902                                 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8903                         },
8904                         _ => panic!("Unexpected event"),
8905                 };
8906         }
8907         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &carol_updates.commitment_signed);
8908         // One monitor update for the preimage to update the Bob<->Alice channel, one monitor update
8909         // Carol<->Bob's updated commitment transaction info.
8910         check_added_monitors!(nodes[1], 2);
8911
8912         let events = nodes[1].node.get_and_clear_pending_msg_events();
8913         assert_eq!(events.len(), 2);
8914         let bob_revocation = match events[0] {
8915                 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
8916                         assert_eq!(*node_id, nodes[2].node.get_our_node_id());
8917                         (*msg).clone()
8918                 },
8919                 _ => panic!("Unexpected event"),
8920         };
8921         let bob_updates = match events[1] {
8922                 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
8923                         assert_eq!(*node_id, nodes[2].node.get_our_node_id());
8924                         (*updates).clone()
8925                 },
8926                 _ => panic!("Unexpected event"),
8927         };
8928
8929         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revocation);
8930         check_added_monitors!(nodes[2], 1);
8931         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_updates.commitment_signed);
8932         check_added_monitors!(nodes[2], 1);
8933
8934         let events = nodes[2].node.get_and_clear_pending_msg_events();
8935         assert_eq!(events.len(), 1);
8936         let carol_revocation = match events[0] {
8937                 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
8938                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
8939                         (*msg).clone()
8940                 },
8941                 _ => panic!("Unexpected event"),
8942         };
8943         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &carol_revocation);
8944         check_added_monitors!(nodes[1], 1);
8945
8946         // If this test requires the force-closed channel to not be on-chain until after the fulfill,
8947         // here's where we put said channel's commitment tx on-chain.
8948         let mut txn_to_broadcast = alice_txn.clone();
8949         if !broadcast_alice { txn_to_broadcast = get_local_commitment_txn!(nodes[1], chan_ab.2); }
8950         if !go_onchain_before_fulfill {
8951                 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()]));
8952                 // If Bob was the one to force-close, he will have already passed these checks earlier.
8953                 if broadcast_alice {
8954                         check_closed_broadcast!(nodes[1], true);
8955                         check_added_monitors!(nodes[1], 1);
8956                         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
8957                 }
8958                 let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
8959                 if broadcast_alice {
8960                         assert_eq!(bob_txn.len(), 1);
8961                         check_spends!(bob_txn[0], txn_to_broadcast[0]);
8962                 } else {
8963                         if nodes[1].connect_style.borrow().updates_best_block_first() {
8964                                 assert_eq!(bob_txn.len(), 3);
8965                                 assert_eq!(bob_txn[0].txid(), bob_txn[1].txid());
8966                         } else {
8967                                 assert_eq!(bob_txn.len(), 2);
8968                         }
8969                         check_spends!(bob_txn[0], chan_ab.3);
8970                 }
8971         }
8972
8973         // Step (6):
8974         // Finally, check that Bob broadcasted a preimage-claiming transaction for the HTLC output on the
8975         // broadcasted commitment transaction.
8976         {
8977                 let script_weight = match broadcast_alice {
8978                         true => OFFERED_HTLC_SCRIPT_WEIGHT,
8979                         false => ACCEPTED_HTLC_SCRIPT_WEIGHT
8980                 };
8981                 // If Alice force-closed, Bob only broadcasts a HTLC-output-claiming transaction. Otherwise,
8982                 // Bob force-closed and broadcasts the commitment transaction along with a
8983                 // HTLC-output-claiming transaction.
8984                 let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
8985                 if broadcast_alice {
8986                         assert_eq!(bob_txn.len(), 1);
8987                         check_spends!(bob_txn[0], txn_to_broadcast[0]);
8988                         assert_eq!(bob_txn[0].input[0].witness.last().unwrap().len(), script_weight);
8989                 } else {
8990                         assert_eq!(bob_txn.len(), if nodes[1].connect_style.borrow().updates_best_block_first() { 3 } else { 2 });
8991                         let htlc_tx = bob_txn.pop().unwrap();
8992                         check_spends!(htlc_tx, txn_to_broadcast[0]);
8993                         assert_eq!(htlc_tx.input[0].witness.last().unwrap().len(), script_weight);
8994                 }
8995         }
8996 }
8997
8998 #[test]
8999 fn test_onchain_htlc_settlement_after_close() {
9000         do_test_onchain_htlc_settlement_after_close(true, true);
9001         do_test_onchain_htlc_settlement_after_close(false, true); // Technically redundant, but may as well
9002         do_test_onchain_htlc_settlement_after_close(true, false);
9003         do_test_onchain_htlc_settlement_after_close(false, false);
9004 }
9005
9006 #[test]
9007 fn test_duplicate_temporary_channel_id_from_different_peers() {
9008         // Tests that we can accept two different `OpenChannel` requests with the same
9009         // `temporary_channel_id`, as long as they are from different peers.
9010         let chanmon_cfgs = create_chanmon_cfgs(3);
9011         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9012         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9013         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9014
9015         // Create an first channel channel
9016         nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
9017         let mut open_chan_msg_chan_1_0 = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
9018
9019         // Create an second channel
9020         nodes[2].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 43, None, None).unwrap();
9021         let mut open_chan_msg_chan_2_0 = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
9022
9023         // Modify the `OpenChannel` from `nodes[2]` to `nodes[0]` to ensure that it uses the same
9024         // `temporary_channel_id` as the `OpenChannel` from nodes[1] to nodes[0].
9025         open_chan_msg_chan_2_0.common_fields.temporary_channel_id = open_chan_msg_chan_1_0.common_fields.temporary_channel_id;
9026
9027         // Assert that `nodes[0]` can accept both `OpenChannel` requests, even though they use the same
9028         // `temporary_channel_id` as they are from different peers.
9029         nodes[0].node.handle_open_channel(&nodes[1].node.get_our_node_id(), &open_chan_msg_chan_1_0);
9030         {
9031                 let events = nodes[0].node.get_and_clear_pending_msg_events();
9032                 assert_eq!(events.len(), 1);
9033                 match &events[0] {
9034                         MessageSendEvent::SendAcceptChannel { node_id, msg } => {
9035                                 assert_eq!(node_id, &nodes[1].node.get_our_node_id());
9036                                 assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id);
9037                         },
9038                         _ => panic!("Unexpected event"),
9039                 }
9040         }
9041
9042         nodes[0].node.handle_open_channel(&nodes[2].node.get_our_node_id(), &open_chan_msg_chan_2_0);
9043         {
9044                 let events = nodes[0].node.get_and_clear_pending_msg_events();
9045                 assert_eq!(events.len(), 1);
9046                 match &events[0] {
9047                         MessageSendEvent::SendAcceptChannel { node_id, msg } => {
9048                                 assert_eq!(node_id, &nodes[2].node.get_our_node_id());
9049                                 assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id);
9050                         },
9051                         _ => panic!("Unexpected event"),
9052                 }
9053         }
9054 }
9055
9056 #[test]
9057 fn test_peer_funding_sidechannel() {
9058         // Test that if a peer somehow learns which txid we'll use for our channel funding before we
9059         // receive `funding_transaction_generated` the peer cannot cause us to crash. We'd previously
9060         // assumed that LDK would receive `funding_transaction_generated` prior to our peer learning
9061         // the txid and panicked if the peer tried to open a redundant channel to us with the same
9062         // funding outpoint.
9063         //
9064         // While this assumption is generally safe, some users may have out-of-band protocols where
9065         // they notify their LSP about a funding outpoint first, or this may be violated in the future
9066         // with collaborative transaction construction protocols, i.e. dual-funding.
9067         let chanmon_cfgs = create_chanmon_cfgs(3);
9068         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9069         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9070         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9071
9072         let temp_chan_id_ab = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
9073         let temp_chan_id_ca = exchange_open_accept_chan(&nodes[2], &nodes[0], 1_000_000, 0);
9074
9075         let (_, tx, funding_output) =
9076                 create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
9077
9078         let cs_funding_events = nodes[2].node.get_and_clear_pending_events();
9079         assert_eq!(cs_funding_events.len(), 1);
9080         match cs_funding_events[0] {
9081                 Event::FundingGenerationReady { .. } => {}
9082                 _ => panic!("Unexpected event {:?}", cs_funding_events),
9083         }
9084
9085         nodes[2].node.funding_transaction_generated_unchecked(&temp_chan_id_ca, &nodes[0].node.get_our_node_id(), tx.clone(), funding_output.index).unwrap();
9086         let funding_created_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, nodes[0].node.get_our_node_id());
9087         nodes[0].node.handle_funding_created(&nodes[2].node.get_our_node_id(), &funding_created_msg);
9088         get_event_msg!(nodes[0], MessageSendEvent::SendFundingSigned, nodes[2].node.get_our_node_id());
9089         expect_channel_pending_event(&nodes[0], &nodes[2].node.get_our_node_id());
9090         check_added_monitors!(nodes[0], 1);
9091
9092         let res = nodes[0].node.funding_transaction_generated(&temp_chan_id_ab, &nodes[1].node.get_our_node_id(), tx.clone());
9093         let err_msg = format!("{:?}", res.unwrap_err());
9094         assert!(err_msg.contains("An existing channel using outpoint "));
9095         assert!(err_msg.contains(" is open with peer"));
9096         // Even though the last funding_transaction_generated errored, it still generated a
9097         // SendFundingCreated. However, when the peer responds with a funding_signed it will send the
9098         // appropriate error message.
9099         let as_funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9100         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &as_funding_created);
9101         check_added_monitors!(nodes[1], 1);
9102         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9103         let reason = ClosureReason::ProcessingError { err: format!("An existing channel using outpoint {} is open with peer {}", funding_output, nodes[2].node.get_our_node_id()), };
9104         check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(ChannelId::v1_from_funding_outpoint(funding_output), true, reason)]);
9105
9106         let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9107         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
9108         get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id());
9109 }
9110
9111 #[test]
9112 fn test_duplicate_conflicting_funding_from_second_peer() {
9113         // Test that if a user tries to fund a channel with a funding outpoint they'd previously used
9114         // we don't try to remove the previous ChannelMonitor. This is largely a test to ensure we
9115         // don't regress in the fuzzer, as such funding getting passed our outpoint-matches checks
9116         // implies the user (and our counterparty) has reused cryptographic keys across channels, which
9117         // we require the user not do.
9118         let chanmon_cfgs = create_chanmon_cfgs(4);
9119         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9120         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9121         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9122
9123         let temp_chan_id = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
9124
9125         let (_, tx, funding_output) =
9126                 create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
9127
9128         // Now that we have a funding outpoint, create a dummy `ChannelMonitor` and insert it into
9129         // nodes[0]'s ChainMonitor so that the initial `ChannelMonitor` write fails.
9130         let dummy_chan_id = create_chan_between_nodes(&nodes[2], &nodes[3]).3;
9131         let dummy_monitor = get_monitor!(nodes[2], dummy_chan_id).clone();
9132         nodes[0].chain_monitor.chain_monitor.watch_channel(funding_output, dummy_monitor).unwrap();
9133
9134         nodes[0].node.funding_transaction_generated(&temp_chan_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9135
9136         let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9137         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
9138         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9139         check_added_monitors!(nodes[1], 1);
9140         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9141
9142         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
9143         // At this point, the channel should be closed, after having generated one monitor write (the
9144         // watch_channel call which failed), but zero monitor updates.
9145         check_added_monitors!(nodes[0], 1);
9146         get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id());
9147         let err_reason = ClosureReason::ProcessingError { err: "Channel funding outpoint was a duplicate".to_owned() };
9148         check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(funding_signed_msg.channel_id, true, err_reason)]);
9149 }
9150
9151 #[test]
9152 fn test_duplicate_funding_err_in_funding() {
9153         // Test that if we have a live channel with one peer, then another peer comes along and tries
9154         // to create a second channel with the same txid we'll fail and not overwrite the
9155         // outpoint_to_peer map in `ChannelManager`.
9156         //
9157         // This was previously broken.
9158         let chanmon_cfgs = create_chanmon_cfgs(3);
9159         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9160         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9161         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9162
9163         let (_, _, _, real_channel_id, funding_tx) = create_chan_between_nodes(&nodes[0], &nodes[1]);
9164         let real_chan_funding_txo = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 };
9165         assert_eq!(ChannelId::v1_from_funding_outpoint(real_chan_funding_txo), real_channel_id);
9166
9167         nodes[2].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
9168         let mut open_chan_msg = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9169         let node_c_temp_chan_id = open_chan_msg.common_fields.temporary_channel_id;
9170         open_chan_msg.common_fields.temporary_channel_id = real_channel_id;
9171         nodes[1].node.handle_open_channel(&nodes[2].node.get_our_node_id(), &open_chan_msg);
9172         let mut accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[2].node.get_our_node_id());
9173         accept_chan_msg.common_fields.temporary_channel_id = node_c_temp_chan_id;
9174         nodes[2].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
9175
9176         // Now that we have a second channel with the same funding txo, send a bogus funding message
9177         // and let nodes[1] remove the inbound channel.
9178         let (_, funding_tx, _) = create_funding_transaction(&nodes[2], &nodes[1].node.get_our_node_id(), 100_000, 42);
9179
9180         nodes[2].node.funding_transaction_generated(&node_c_temp_chan_id, &nodes[1].node.get_our_node_id(), funding_tx).unwrap();
9181
9182         let mut funding_created_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9183         funding_created_msg.temporary_channel_id = real_channel_id;
9184         // Make the signature invalid by changing the funding output
9185         funding_created_msg.funding_output_index += 10;
9186         nodes[1].node.handle_funding_created(&nodes[2].node.get_our_node_id(), &funding_created_msg);
9187         get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id());
9188         let err = "Invalid funding_created signature from peer".to_owned();
9189         let reason = ClosureReason::ProcessingError { err };
9190         let expected_closing = ExpectedCloseEvent::from_id_reason(real_channel_id, false, reason);
9191         check_closed_events(&nodes[1], &[expected_closing]);
9192
9193         assert_eq!(
9194                 *nodes[1].node.outpoint_to_peer.lock().unwrap().get(&real_chan_funding_txo).unwrap(),
9195                 nodes[0].node.get_our_node_id()
9196         );
9197 }
9198
9199 #[test]
9200 fn test_duplicate_chan_id() {
9201         // Test that if a given peer tries to open a channel with the same channel_id as one that is
9202         // already open we reject it and keep the old channel.
9203         //
9204         // Previously, full_stack_target managed to figure out that if you tried to open two channels
9205         // with the same funding output (ie post-funding channel_id), we'd create a monitor update for
9206         // the existing channel when we detect the duplicate new channel, screwing up our monitor
9207         // updating logic for the existing channel.
9208         let chanmon_cfgs = create_chanmon_cfgs(2);
9209         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9210         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9211         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9212
9213         // Create an initial channel
9214         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
9215         let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9216         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
9217         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9218
9219         // Try to create a second channel with the same temporary_channel_id as the first and check
9220         // that it is rejected.
9221         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
9222         {
9223                 let events = nodes[1].node.get_and_clear_pending_msg_events();
9224                 assert_eq!(events.len(), 1);
9225                 match events[0] {
9226                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
9227                                 // Technically, at this point, nodes[1] would be justified in thinking both the
9228                                 // first (valid) and second (invalid) channels are closed, given they both have
9229                                 // the same non-temporary channel_id. However, currently we do not, so we just
9230                                 // move forward with it.
9231                                 assert_eq!(msg.channel_id, open_chan_msg.common_fields.temporary_channel_id);
9232                                 assert_eq!(node_id, nodes[0].node.get_our_node_id());
9233                         },
9234                         _ => panic!("Unexpected event"),
9235                 }
9236         }
9237
9238         // Move the first channel through the funding flow...
9239         let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
9240
9241         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9242         check_added_monitors!(nodes[0], 0);
9243
9244         let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9245         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
9246         {
9247                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
9248                 assert_eq!(added_monitors.len(), 1);
9249                 assert_eq!(added_monitors[0].0, funding_output);
9250                 added_monitors.clear();
9251         }
9252         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9253
9254         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9255
9256         let funding_outpoint = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index };
9257         let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint);
9258
9259         // Now we have the first channel past funding_created (ie it has a txid-based channel_id, not a
9260         // temporary one).
9261
9262         // First try to open a second channel with a temporary channel id equal to the txid-based one.
9263         // Technically this is allowed by the spec, but we don't support it and there's little reason
9264         // to. Still, it shouldn't cause any other issues.
9265         open_chan_msg.common_fields.temporary_channel_id = channel_id;
9266         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
9267         {
9268                 let events = nodes[1].node.get_and_clear_pending_msg_events();
9269                 assert_eq!(events.len(), 1);
9270                 match events[0] {
9271                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
9272                                 // Technically, at this point, nodes[1] would be justified in thinking both
9273                                 // channels are closed, but currently we do not, so we just move forward with it.
9274                                 assert_eq!(msg.channel_id, open_chan_msg.common_fields.temporary_channel_id);
9275                                 assert_eq!(node_id, nodes[0].node.get_our_node_id());
9276                         },
9277                         _ => panic!("Unexpected event"),
9278                 }
9279         }
9280
9281         // Now try to create a second channel which has a duplicate funding output.
9282         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
9283         let open_chan_2_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9284         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_2_msg);
9285         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9286         create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); // Get and check the FundingGenerationReady event
9287
9288         let funding_created = {
9289                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
9290                 let mut a_peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
9291                 // Once we call `get_funding_created` the channel has a duplicate channel_id as
9292                 // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
9293                 // try to create another channel. Instead, we drop the channel entirely here (leaving the
9294                 // channelmanager in a possibly nonsense state instead).
9295                 match a_peer_state.channel_by_id.remove(&open_chan_2_msg.common_fields.temporary_channel_id).unwrap() {
9296                         ChannelPhase::UnfundedOutboundV1(mut chan) => {
9297                                 let logger = test_utils::TestLogger::new();
9298                                 chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap()
9299                         },
9300                         _ => panic!("Unexpected ChannelPhase variant"),
9301                 }.unwrap()
9302         };
9303         check_added_monitors!(nodes[0], 0);
9304         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
9305         // At this point we'll look up if the channel_id is present and immediately fail the channel
9306         // without trying to persist the `ChannelMonitor`.
9307         check_added_monitors!(nodes[1], 0);
9308
9309         check_closed_events(&nodes[1], &[
9310                 ExpectedCloseEvent::from_id_reason(funding_created.temporary_channel_id, false, ClosureReason::ProcessingError {
9311                         err: "Already had channel with the new channel_id".to_owned()
9312                 })
9313         ]);
9314
9315         // ...still, nodes[1] will reject the duplicate channel.
9316         {
9317                 let events = nodes[1].node.get_and_clear_pending_msg_events();
9318                 assert_eq!(events.len(), 1);
9319                 match events[0] {
9320                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
9321                                 // Technically, at this point, nodes[1] would be justified in thinking both
9322                                 // channels are closed, but currently we do not, so we just move forward with it.
9323                                 assert_eq!(msg.channel_id, channel_id);
9324                                 assert_eq!(node_id, nodes[0].node.get_our_node_id());
9325                         },
9326                         _ => panic!("Unexpected event"),
9327                 }
9328         }
9329
9330         // finally, finish creating the original channel and send a payment over it to make sure
9331         // everything is functional.
9332         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
9333         {
9334                 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
9335                 assert_eq!(added_monitors.len(), 1);
9336                 assert_eq!(added_monitors[0].0, funding_output);
9337                 added_monitors.clear();
9338         }
9339         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9340
9341         let events_4 = nodes[0].node.get_and_clear_pending_events();
9342         assert_eq!(events_4.len(), 0);
9343         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
9344         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
9345
9346         let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
9347         let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
9348         update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
9349
9350         send_payment(&nodes[0], &[&nodes[1]], 8000000);
9351 }
9352
9353 #[test]
9354 fn test_error_chans_closed() {
9355         // Test that we properly handle error messages, closing appropriate channels.
9356         //
9357         // Prior to #787 we'd allow a peer to make us force-close a channel we had with a different
9358         // peer. The "real" fix for that is to index channels with peers_ids, however in the mean time
9359         // we can test various edge cases around it to ensure we don't regress.
9360         let chanmon_cfgs = create_chanmon_cfgs(3);
9361         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9362         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9363         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9364
9365         // Create some initial channels
9366         let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9367         let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9368         let chan_3 = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100000, 10001);
9369
9370         assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
9371         assert_eq!(nodes[1].node.list_usable_channels().len(), 2);
9372         assert_eq!(nodes[2].node.list_usable_channels().len(), 1);
9373
9374         // Closing a channel from a different peer has no effect
9375         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_3.2, data: "ERR".to_owned() });
9376         assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
9377
9378         // Closing one channel doesn't impact others
9379         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
9380         check_added_monitors!(nodes[0], 1);
9381         check_closed_broadcast!(nodes[0], false);
9382         check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) },
9383                 [nodes[1].node.get_our_node_id()], 100000);
9384         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
9385         assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
9386         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
9387         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_3.2);
9388
9389         // A null channel ID should close all channels
9390         let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9391         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: ChannelId::new_zero(), data: "ERR".to_owned() });
9392         check_added_monitors!(nodes[0], 2);
9393         check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) },
9394                 [nodes[1].node.get_our_node_id(); 2], 100000);
9395         let events = nodes[0].node.get_and_clear_pending_msg_events();
9396         assert_eq!(events.len(), 2);
9397         match events[0] {
9398                 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
9399                         assert_eq!(msg.contents.flags & 2, 2);
9400                 },
9401                 _ => panic!("Unexpected event"),
9402         }
9403         match events[1] {
9404                 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
9405                         assert_eq!(msg.contents.flags & 2, 2);
9406                 },
9407                 _ => panic!("Unexpected event"),
9408         }
9409         // Note that at this point users of a standard PeerHandler will end up calling
9410         // peer_disconnected.
9411         assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
9412         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
9413
9414         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
9415         assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
9416         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
9417 }
9418
9419 #[test]
9420 fn test_invalid_funding_tx() {
9421         // Test that we properly handle invalid funding transactions sent to us from a peer.
9422         //
9423         // Previously, all other major lightning implementations had failed to properly sanitize
9424         // funding transactions from their counterparties, leading to a multi-implementation critical
9425         // security vulnerability (though we always sanitized properly, we've previously had
9426         // un-released crashes in the sanitization process).
9427         //
9428         // Further, if the funding transaction is consensus-valid, confirms, and is later spent, we'd
9429         // previously have crashed in `ChannelMonitor` even though we closed the channel as bogus and
9430         // gave up on it. We test this here by generating such a transaction.
9431         let chanmon_cfgs = create_chanmon_cfgs(2);
9432         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9433         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9434         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9435
9436         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 10_000, 42, None, None).unwrap();
9437         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
9438         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9439
9440         let (temporary_channel_id, mut tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
9441
9442         // Create a witness program which can be spent by a 4-empty-stack-elements witness and which is
9443         // 136 bytes long. This matches our "accepted HTLC preimage spend" matching, previously causing
9444         // a panic as we'd try to extract a 32 byte preimage from a witness element without checking
9445         // its length.
9446         let mut wit_program: Vec<u8> = channelmonitor::deliberately_bogus_accepted_htlc_witness_program();
9447         let wit_program_script: ScriptBuf = wit_program.into();
9448         for output in tx.output.iter_mut() {
9449                 // Make the confirmed funding transaction have a bogus script_pubkey
9450                 output.script_pubkey = ScriptBuf::new_p2wsh(&wit_program_script.wscript_hash());
9451         }
9452
9453         nodes[0].node.funding_transaction_generated_unchecked(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone(), 0).unwrap();
9454         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
9455         check_added_monitors!(nodes[1], 1);
9456         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9457
9458         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
9459         check_added_monitors!(nodes[0], 1);
9460         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9461
9462         let events_1 = nodes[0].node.get_and_clear_pending_events();
9463         assert_eq!(events_1.len(), 0);
9464
9465         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
9466         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
9467         nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
9468
9469         let expected_err = "funding tx had wrong script/value or output index";
9470         confirm_transaction_at(&nodes[1], &tx, 1);
9471         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() },
9472                 [nodes[0].node.get_our_node_id()], 100000);
9473         check_added_monitors!(nodes[1], 1);
9474         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
9475         assert_eq!(events_2.len(), 1);
9476         if let MessageSendEvent::HandleError { node_id, action } = &events_2[0] {
9477                 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
9478                 if let msgs::ErrorAction::DisconnectPeer { msg } = action {
9479                         assert_eq!(msg.as_ref().unwrap().data, "Channel closed because of an exception: ".to_owned() + expected_err);
9480                 } else { panic!(); }
9481         } else { panic!(); }
9482         assert_eq!(nodes[1].node.list_channels().len(), 0);
9483
9484         // Now confirm a spend of the (bogus) funding transaction. As long as the witness is 5 elements
9485         // long the ChannelMonitor will try to read 32 bytes from the second-to-last element, panicing
9486         // as its not 32 bytes long.
9487         let mut spend_tx = Transaction {
9488                 version: Version::TWO, lock_time: LockTime::ZERO,
9489                 input: tx.output.iter().enumerate().map(|(idx, _)| TxIn {
9490                         previous_output: BitcoinOutPoint {
9491                                 txid: tx.txid(),
9492                                 vout: idx as u32,
9493                         },
9494                         script_sig: ScriptBuf::new(),
9495                         sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
9496                         witness: Witness::from_slice(&channelmonitor::deliberately_bogus_accepted_htlc_witness())
9497                 }).collect(),
9498                 output: vec![TxOut {
9499                         value: Amount::from_sat(1000),
9500                         script_pubkey: ScriptBuf::new(),
9501                 }]
9502         };
9503         check_spends!(spend_tx, tx);
9504         mine_transaction(&nodes[1], &spend_tx);
9505 }
9506
9507 #[test]
9508 fn test_coinbase_funding_tx() {
9509         // Miners are able to fund channels directly from coinbase transactions, however
9510         // by consensus rules, outputs of a coinbase transaction are encumbered by a 100
9511         // block maturity timelock. To ensure that a (non-0conf) channel like this is enforceable
9512         // on-chain, the minimum depth is updated to 100 blocks for coinbase funding transactions.
9513         //
9514         // Note that 0conf channels with coinbase funding transactions are unaffected and are
9515         // immediately operational after opening.
9516         let chanmon_cfgs = create_chanmon_cfgs(2);
9517         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9518         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9519         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9520
9521         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
9522         let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9523
9524         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
9525         let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
9526
9527         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
9528
9529         // Create the coinbase funding transaction.
9530         let (temporary_channel_id, tx, _) = create_coinbase_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
9531
9532         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9533         check_added_monitors!(nodes[0], 0);
9534         let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9535
9536         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
9537         check_added_monitors!(nodes[1], 1);
9538         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9539
9540         let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9541
9542         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
9543         check_added_monitors!(nodes[0], 1);
9544
9545         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9546         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
9547
9548         // Starting at height 0, we "confirm" the coinbase at height 1.
9549         confirm_transaction_at(&nodes[0], &tx, 1);
9550         // We connect 98 more blocks to have 99 confirmations for the coinbase transaction.
9551         connect_blocks(&nodes[0], COINBASE_MATURITY - 2);
9552         // Check that we have no pending message events (we have not queued a `channel_ready` yet).
9553         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
9554         // Now connect one more block which results in 100 confirmations of the coinbase transaction.
9555         connect_blocks(&nodes[0], 1);
9556         // There should now be a `channel_ready` which can be handled.
9557         let _ = &nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &get_event_msg!(&nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
9558
9559         confirm_transaction_at(&nodes[1], &tx, 1);
9560         connect_blocks(&nodes[1], COINBASE_MATURITY - 2);
9561         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
9562         connect_blocks(&nodes[1], 1);
9563         expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id());
9564         create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
9565 }
9566
9567 fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_timelock: bool) {
9568         // In the first version of the chain::Confirm interface, after a refactor was made to not
9569         // broadcast CSV-locked transactions until their CSV lock is up, we wouldn't reliably broadcast
9570         // transactions after a `transactions_confirmed` call. Specifically, if the chain, provided via
9571         // `best_block_updated` is at height N, and a transaction output which we wish to spend at
9572         // height N-1 (due to a CSV to height N-1) is provided at height N, we will not broadcast the
9573         // spending transaction until height N+1 (or greater). This was due to the way
9574         // `ChannelMonitor::transactions_confirmed` worked, only checking if we should broadcast a
9575         // spending transaction at the height the input transaction was confirmed at, not whether we
9576         // should broadcast a spending transaction at the current height.
9577         // A second, similar, issue involved failing HTLCs backwards - because we only provided the
9578         // height at which transactions were confirmed to `OnchainTx::update_claims_view`, it wasn't
9579         // aware that the anti-reorg-delay had, in fact, already expired, waiting to fail-backwards
9580         // until we learned about an additional block.
9581         //
9582         // As an additional check, if `test_height_before_timelock` is set, we instead test that we
9583         // aren't broadcasting transactions too early (ie not broadcasting them at all).
9584         let chanmon_cfgs = create_chanmon_cfgs(3);
9585         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9586         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9587         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9588         *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
9589
9590         create_announced_chan_between_nodes(&nodes, 0, 1);
9591         let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
9592         let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
9593         nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
9594         nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
9595         let error_message = "Channel force-closed";
9596         nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id(), error_message.to_string()).unwrap();
9597         check_closed_broadcast!(nodes[1], true);
9598         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[2].node.get_our_node_id()], 100000);
9599         check_added_monitors!(nodes[1], 1);
9600         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
9601         assert_eq!(node_txn.len(), 1);
9602
9603         let conf_height = nodes[1].best_block_info().1;
9604         if !test_height_before_timelock {
9605                 connect_blocks(&nodes[1], 24 * 6);
9606         }
9607         nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
9608                 &nodes[1].get_block_header(conf_height), &[(0, &node_txn[0])], conf_height);
9609         if test_height_before_timelock {
9610                 // If we confirmed the close transaction, but timelocks have not yet expired, we should not
9611                 // generate any events or broadcast any transactions
9612                 assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
9613                 assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
9614         } else {
9615                 // We should broadcast an HTLC transaction spending our funding transaction first
9616                 let spending_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
9617                 assert_eq!(spending_txn.len(), 2);
9618                 let htlc_tx = if spending_txn[0].txid() == node_txn[0].txid() {
9619                         &spending_txn[1]
9620                 } else {
9621                         &spending_txn[0]
9622                 };
9623                 check_spends!(htlc_tx, node_txn[0]);
9624                 // We should also generate a SpendableOutputs event with the to_self output (as its
9625                 // timelock is up).
9626                 let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
9627                 assert_eq!(descriptor_spend_txn.len(), 1);
9628
9629                 // If we also discover that the HTLC-Timeout transaction was confirmed some time ago, we
9630                 // should immediately fail-backwards the HTLC to the previous hop, without waiting for an
9631                 // additional block built on top of the current chain.
9632                 nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
9633                         &nodes[1].get_block_header(conf_height + 1), &[(0, htlc_tx)], conf_height + 1);
9634                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]);
9635                 check_added_monitors!(nodes[1], 1);
9636
9637                 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9638                 assert!(updates.update_add_htlcs.is_empty());
9639                 assert!(updates.update_fulfill_htlcs.is_empty());
9640                 assert_eq!(updates.update_fail_htlcs.len(), 1);
9641                 assert!(updates.update_fail_malformed_htlcs.is_empty());
9642                 assert!(updates.update_fee.is_none());
9643                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
9644                 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
9645                 expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_announce.contents.short_channel_id, true);
9646         }
9647 }
9648
9649 #[test]
9650 fn test_tx_confirmed_skipping_blocks_immediate_broadcast() {
9651         do_test_tx_confirmed_skipping_blocks_immediate_broadcast(false);
9652         do_test_tx_confirmed_skipping_blocks_immediate_broadcast(true);
9653 }
9654
9655 fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) {
9656         let chanmon_cfgs = create_chanmon_cfgs(2);
9657         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9658         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9659         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9660
9661         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9662
9663         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
9664                 .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
9665         let route = get_route!(nodes[0], payment_params, 10_000).unwrap();
9666
9667         let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[1]);
9668
9669         {
9670                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
9671                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
9672                 check_added_monitors!(nodes[0], 1);
9673                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9674                 assert_eq!(events.len(), 1);
9675                 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
9676                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9677                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
9678         }
9679         expect_pending_htlcs_forwardable!(nodes[1]);
9680         expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 10_000);
9681
9682         {
9683                 // Note that we use a different PaymentId here to allow us to duplicativly pay
9684                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
9685                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_secret.0)).unwrap();
9686                 check_added_monitors!(nodes[0], 1);
9687                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9688                 assert_eq!(events.len(), 1);
9689                 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
9690                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9691                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
9692                 // At this point, nodes[1] would notice it has too much value for the payment. It will
9693                 // assume the second is a privacy attack (no longer particularly relevant
9694                 // post-payment_secrets) and fail back the new HTLC. Previously, it'd also have failed back
9695                 // the first HTLC delivered above.
9696         }
9697
9698         expect_pending_htlcs_forwardable_ignore!(nodes[1]);
9699         nodes[1].node.process_pending_htlc_forwards();
9700
9701         if test_for_second_fail_panic {
9702                 // Now we go fail back the first HTLC from the user end.
9703                 nodes[1].node.fail_htlc_backwards(&our_payment_hash);
9704
9705                 let expected_destinations = vec![
9706                         HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
9707                         HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
9708                 ];
9709                 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1],  expected_destinations);
9710                 nodes[1].node.process_pending_htlc_forwards();
9711
9712                 check_added_monitors!(nodes[1], 1);
9713                 let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9714                 assert_eq!(fail_updates_1.update_fail_htlcs.len(), 2);
9715
9716                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9717                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[1]);
9718                 commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
9719
9720                 let failure_events = nodes[0].node.get_and_clear_pending_events();
9721                 assert_eq!(failure_events.len(), 4);
9722                 if let Event::PaymentPathFailed { .. } = failure_events[0] {} else { panic!(); }
9723                 if let Event::PaymentFailed { .. } = failure_events[1] {} else { panic!(); }
9724                 if let Event::PaymentPathFailed { .. } = failure_events[2] {} else { panic!(); }
9725                 if let Event::PaymentFailed { .. } = failure_events[3] {} else { panic!(); }
9726         } else {
9727                 // Let the second HTLC fail and claim the first
9728                 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
9729                 nodes[1].node.process_pending_htlc_forwards();
9730
9731                 check_added_monitors!(nodes[1], 1);
9732                 let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9733                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9734                 commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
9735
9736                 expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new());
9737
9738                 claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
9739         }
9740 }
9741
9742 #[test]
9743 fn test_dup_htlc_second_fail_panic() {
9744         // Previously, if we received two HTLCs back-to-back, where the second overran the expected
9745         // value for the payment, we'd fail back both HTLCs after generating a `PaymentClaimable` event.
9746         // Then, if the user failed the second payment, they'd hit a "tried to fail an already failed
9747         // HTLC" debug panic. This tests for this behavior, checking that only one HTLC is auto-failed.
9748         do_test_dup_htlc_second_rejected(true);
9749 }
9750
9751 #[test]
9752 fn test_dup_htlc_second_rejected() {
9753         // Test that if we receive a second HTLC for an MPP payment that overruns the payment amount we
9754         // simply reject the second HTLC but are still able to claim the first HTLC.
9755         do_test_dup_htlc_second_rejected(false);
9756 }
9757
9758 #[test]
9759 fn test_inconsistent_mpp_params() {
9760         // Test that if we recieve two HTLCs with different payment parameters we fail back the first
9761         // such HTLC and allow the second to stay.
9762         let chanmon_cfgs = create_chanmon_cfgs(4);
9763         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9764         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9765         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9766
9767         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0);
9768         create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0);
9769         create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0);
9770         let chan_2_3 =create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0);
9771
9772         let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV)
9773                 .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap();
9774         let mut route = get_route!(nodes[0], payment_params, 15_000_000).unwrap();
9775         assert_eq!(route.paths.len(), 2);
9776         route.paths.sort_by(|path_a, _| {
9777                 // Sort the path so that the path through nodes[1] comes first
9778                 if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() {
9779                         core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
9780         });
9781
9782         let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[3]);
9783
9784         let cur_height = nodes[0].best_block_info().1;
9785         let payment_id = PaymentId([42; 32]);
9786
9787         let session_privs = {
9788                 // We create a fake route here so that we start with three pending HTLCs, which we'll
9789                 // ultimately have, just not right away.
9790                 let mut dup_route = route.clone();
9791                 dup_route.paths.push(route.paths[1].clone());
9792                 nodes[0].node.test_add_new_pending_payment(our_payment_hash,
9793                         RecipientOnionFields::secret_only(our_payment_secret), payment_id, &dup_route).unwrap()
9794         };
9795         nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash,
9796                 RecipientOnionFields::secret_only(our_payment_secret), 15_000_000, cur_height, payment_id,
9797                 &None, session_privs[0]).unwrap();
9798         check_added_monitors!(nodes[0], 1);
9799
9800         {
9801                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9802                 assert_eq!(events.len(), 1);
9803                 pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), false, None);
9804         }
9805         assert!(nodes[3].node.get_and_clear_pending_events().is_empty());
9806
9807         nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash,
9808                 RecipientOnionFields::secret_only(our_payment_secret), 14_000_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
9809         check_added_monitors!(nodes[0], 1);
9810
9811         {
9812                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9813                 assert_eq!(events.len(), 1);
9814                 let payment_event = SendEvent::from_event(events.pop().unwrap());
9815
9816                 nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9817                 commitment_signed_dance!(nodes[2], nodes[0], payment_event.commitment_msg, false);
9818
9819                 expect_pending_htlcs_forwardable!(nodes[2]);
9820                 check_added_monitors!(nodes[2], 1);
9821
9822                 let mut events = nodes[2].node.get_and_clear_pending_msg_events();
9823                 assert_eq!(events.len(), 1);
9824                 let payment_event = SendEvent::from_event(events.pop().unwrap());
9825
9826                 nodes[3].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
9827                 check_added_monitors!(nodes[3], 0);
9828                 commitment_signed_dance!(nodes[3], nodes[2], payment_event.commitment_msg, true, true);
9829
9830                 // At this point, nodes[3] should notice the two HTLCs don't contain the same total payment
9831                 // amount. It will assume the second is a privacy attack (no longer particularly relevant
9832                 // post-payment_secrets) and fail back the new HTLC.
9833         }
9834         expect_pending_htlcs_forwardable_ignore!(nodes[3]);
9835         nodes[3].node.process_pending_htlc_forwards();
9836         expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
9837         nodes[3].node.process_pending_htlc_forwards();
9838
9839         check_added_monitors!(nodes[3], 1);
9840
9841         let fail_updates_1 = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
9842         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9843         commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false);
9844
9845         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]);
9846         check_added_monitors!(nodes[2], 1);
9847
9848         let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
9849         nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &fail_updates_2.update_fail_htlcs[0]);
9850         commitment_signed_dance!(nodes[0], nodes[2], fail_updates_2.commitment_signed, false);
9851
9852         expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain());
9853
9854         nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash,
9855                 RecipientOnionFields::secret_only(our_payment_secret), 15_000_000, cur_height, payment_id,
9856                 &None, session_privs[2]).unwrap();
9857         check_added_monitors!(nodes[0], 1);
9858
9859         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9860         assert_eq!(events.len(), 1);
9861         pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), true, None);
9862
9863         do_claim_payment_along_route(
9864                 ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], our_payment_preimage)
9865         );
9866         expect_payment_sent(&nodes[0], our_payment_preimage, Some(None), true, true);
9867 }
9868
9869 #[test]
9870 fn test_double_partial_claim() {
9871         // Test what happens if a node receives a payment, generates a PaymentClaimable event, the HTLCs
9872         // time out, the sender resends only some of the MPP parts, then the user processes the
9873         // PaymentClaimable event, ensuring they don't inadvertently claim only part of the full payment
9874         // amount.
9875         let chanmon_cfgs = create_chanmon_cfgs(4);
9876         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9877         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9878         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9879
9880         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0);
9881         create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0);
9882         create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0);
9883         create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0);
9884
9885         let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000);
9886         assert_eq!(route.paths.len(), 2);
9887         route.paths.sort_by(|path_a, _| {
9888                 // Sort the path so that the path through nodes[1] comes first
9889                 if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() {
9890                         core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
9891         });
9892
9893         send_along_route_with_secret(&nodes[0], route.clone(), &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 15_000_000, payment_hash, payment_secret);
9894         // nodes[3] has now received a PaymentClaimable event...which it will take some (exorbitant)
9895         // amount of time to respond to.
9896
9897         // Connect some blocks to time out the payment
9898         connect_blocks(&nodes[3], TEST_FINAL_CLTV);
9899         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // To get the same height for sending later
9900
9901         let failed_destinations = vec![
9902                 HTLCDestination::FailedPayment { payment_hash },
9903                 HTLCDestination::FailedPayment { payment_hash },
9904         ];
9905         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations);
9906
9907         pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash, PaymentFailureReason::RecipientRejected);
9908
9909         // nodes[1] now retries one of the two paths...
9910         nodes[0].node.send_payment_with_route(&route, payment_hash,
9911                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9912         check_added_monitors!(nodes[0], 2);
9913
9914         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9915         assert_eq!(events.len(), 2);
9916         let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
9917         pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_1_msgs, false, None);
9918
9919         // At this point nodes[3] has received one half of the payment, and the user goes to handle
9920         // that PaymentClaimable event they got hours ago and never handled...we should refuse to claim.
9921         nodes[3].node.claim_funds(payment_preimage);
9922         check_added_monitors!(nodes[3], 0);
9923         assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty());
9924 }
9925
9926 /// The possible events which may trigger a `max_dust_htlc_exposure` breach
9927 #[derive(Clone, Copy, PartialEq)]
9928 enum ExposureEvent {
9929         /// Breach occurs at HTLC forwarding (see `send_htlc`)
9930         AtHTLCForward,
9931         /// Breach occurs at HTLC reception (see `update_add_htlc`)
9932         AtHTLCReception,
9933         /// Breach occurs at outbound update_fee (see `send_update_fee`)
9934         AtUpdateFeeOutbound,
9935 }
9936
9937 fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool, multiplier_dust_limit: bool, apply_excess_fee: bool) {
9938         // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat`
9939         // policy.
9940         //
9941         // At HTLC forward (`send_payment()`), if the sum of the trimmed-to-dust HTLC inbound and
9942         // trimmed-to-dust HTLC outbound balance and this new payment as included on next
9943         // counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll reject the
9944         // update. At HTLC reception (`update_add_htlc()`), if the sum of the trimmed-to-dust HTLC
9945         // inbound and trimmed-to-dust HTLC outbound balance and this new received HTLC as included
9946         // on next counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll fail
9947         // the update. Note, we return a `temporary_channel_failure` (0x1000 | 7), as the channel
9948         // might be available again for HTLC processing once the dust bandwidth has cleared up.
9949
9950         let chanmon_cfgs = create_chanmon_cfgs(2);
9951         let mut config = test_default_channel_config();
9952
9953         // We hard-code the feerate values here but they're re-calculated furter down and asserted.
9954         // If the values ever change below these constants should simply be updated.
9955         const AT_FEE_OUTBOUND_HTLCS: u64 = 20;
9956         let nondust_htlc_count_in_limit =
9957         if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound  {
9958                 AT_FEE_OUTBOUND_HTLCS
9959         } else { 0 };
9960         let initial_feerate = if apply_excess_fee { 253 * 2 } else { 253 };
9961         let expected_dust_buffer_feerate = initial_feerate + 2530;
9962         let mut commitment_tx_cost = commit_tx_fee_msat(initial_feerate - 253, nondust_htlc_count_in_limit, &ChannelTypeFeatures::empty());
9963         commitment_tx_cost +=
9964                 if on_holder_tx {
9965                         htlc_success_tx_weight(&ChannelTypeFeatures::empty())
9966                 } else {
9967                         htlc_timeout_tx_weight(&ChannelTypeFeatures::empty())
9968                 } * (initial_feerate as u64 - 253) / 1000 * nondust_htlc_count_in_limit;
9969         {
9970                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
9971                 *feerate_lock = initial_feerate;
9972         }
9973         config.channel_config.max_dust_htlc_exposure = if multiplier_dust_limit {
9974                 // Default test fee estimator rate is 253 sat/kw, so we set the multiplier to 5_000_000 / 253
9975                 // to get roughly the same initial value as the default setting when this test was
9976                 // originally written.
9977                 MaxDustHTLCExposure::FeeRateMultiplier((5_000_000 + commitment_tx_cost) / 253)
9978         } else { MaxDustHTLCExposure::FixedLimitMsat(5_000_000 + commitment_tx_cost) };
9979         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9980         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
9981         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9982
9983         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap();
9984         let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9985         open_channel.common_fields.max_htlc_value_in_flight_msat = 50_000_000;
9986         open_channel.common_fields.max_accepted_htlcs = 60;
9987         if on_holder_tx {
9988                 open_channel.common_fields.dust_limit_satoshis = 546;
9989         }
9990         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
9991         let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
9992         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
9993
9994         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9995
9996         let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
9997
9998         if on_holder_tx {
9999                 let mut node_0_per_peer_lock;
10000                 let mut node_0_peer_state_lock;
10001                 match get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id) {
10002                         ChannelPhase::UnfundedOutboundV1(chan) => {
10003                                 chan.context.holder_dust_limit_satoshis = 546;
10004                         },
10005                         _ => panic!("Unexpected ChannelPhase variant"),
10006                 }
10007         }
10008
10009         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
10010         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
10011         check_added_monitors!(nodes[1], 1);
10012         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
10013
10014         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
10015         check_added_monitors!(nodes[0], 1);
10016         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
10017
10018         let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
10019         let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
10020         update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
10021
10022         {
10023                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10024                 *feerate_lock = 253;
10025         }
10026
10027         // Fetch a route in advance as we will be unable to once we're unable to send.
10028         let (mut route, payment_hash, _, payment_secret) =
10029                 get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
10030
10031         let (dust_buffer_feerate, max_dust_htlc_exposure_msat) = {
10032                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
10033                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
10034                 let chan = chan_lock.channel_by_id.get(&channel_id).unwrap();
10035                 (chan.context().get_dust_buffer_feerate(None) as u64,
10036                 chan.context().get_max_dust_htlc_exposure_msat(253))
10037         };
10038         assert_eq!(dust_buffer_feerate, expected_dust_buffer_feerate as u64);
10039         let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - 1) * 1000;
10040         let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
10041
10042         // Substract 3 sats for multiplier and 2 sats for fixed limit to make sure we are 50% below the dust limit.
10043         // This is to make sure we fully use the dust limit. If we don't, we could end up with `dust_ibd_htlc_on_holder_tx` being 1
10044         // while `max_dust_htlc_exposure_msat` is not equal to `dust_outbound_htlc_on_holder_tx_msat`.
10045         let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - if multiplier_dust_limit { 3 } else { 2 }) * 1000;
10046         let dust_inbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat;
10047
10048         // This test was written with a fixed dust value here, which we retain, but assert that it is,
10049         // indeed, dust on both transactions.
10050         let dust_htlc_on_counterparty_tx: u64 = 4;
10051         let dust_htlc_on_counterparty_tx_msat: u64 = 1_250_000;
10052         let calcd_dust_htlc_on_counterparty_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - if multiplier_dust_limit { 3 } else { 2 }) * 1000;
10053         assert!(dust_htlc_on_counterparty_tx_msat < dust_inbound_htlc_on_holder_tx_msat);
10054         assert!(dust_htlc_on_counterparty_tx_msat < calcd_dust_htlc_on_counterparty_tx_msat);
10055
10056         if on_holder_tx {
10057                 if dust_outbound_balance {
10058                         // Outbound dust threshold: 2223 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`)
10059                         // Outbound dust balance: 4372 sats
10060                         // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2132 sats
10061                         for _ in 0..dust_outbound_htlc_on_holder_tx {
10062                                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_outbound_htlc_on_holder_tx_msat);
10063                                 nodes[0].node.send_payment_with_route(&route, payment_hash,
10064                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10065                         }
10066                 } else {
10067                         // Inbound dust threshold: 2324 sats (`dust_buffer_feerate` * HTLC_SUCCESS_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`)
10068                         // Inbound dust balance: 4372 sats
10069                         // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2031 sats
10070                         for _ in 0..dust_inbound_htlc_on_holder_tx {
10071                                 route_payment(&nodes[1], &[&nodes[0]], dust_inbound_htlc_on_holder_tx_msat);
10072                         }
10073                 }
10074         } else {
10075                 if dust_outbound_balance {
10076                         // Outbound dust threshold: 2132 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`)
10077                         // Outbound dust balance: 5000 sats
10078                         for _ in 0..dust_htlc_on_counterparty_tx - 1 {
10079                                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_htlc_on_counterparty_tx_msat);
10080                                 nodes[0].node.send_payment_with_route(&route, payment_hash,
10081                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10082                         }
10083                 } else {
10084                         // Inbound dust threshold: 2031 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`)
10085                         // Inbound dust balance: 5000 sats
10086                         for _ in 0..dust_htlc_on_counterparty_tx - 1 {
10087                                 route_payment(&nodes[1], &[&nodes[0]], dust_htlc_on_counterparty_tx_msat);
10088                         }
10089                 }
10090         }
10091
10092         if exposure_breach_event == ExposureEvent::AtHTLCForward {
10093                 route.paths[0].hops.last_mut().unwrap().fee_msat =
10094                         if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 1 };
10095                 // With default dust exposure: 5000 sats
10096                 if on_holder_tx {
10097                         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash,
10098                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
10099                                 ), true, APIError::ChannelUnavailable { .. }, {});
10100                 } else {
10101                         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash,
10102                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
10103                                 ), true, APIError::ChannelUnavailable { .. }, {});
10104                 }
10105         } else if exposure_breach_event == ExposureEvent::AtHTLCReception {
10106                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 4 });
10107                 nodes[1].node.send_payment_with_route(&route, payment_hash,
10108                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10109                 check_added_monitors!(nodes[1], 1);
10110                 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
10111                 assert_eq!(events.len(), 1);
10112                 let payment_event = SendEvent::from_event(events.remove(0));
10113                 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
10114                 // With default dust exposure: 5000 sats
10115                 if on_holder_tx {
10116                         // Outbound dust balance: 6399 sats
10117                         let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1);
10118                         let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat;
10119                         nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, max_dust_htlc_exposure_msat), 1);
10120                 } else {
10121                         // Outbound dust balance: 5200 sats
10122                         nodes[0].logger.assert_log("lightning::ln::channel",
10123                                 format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
10124                                         dust_htlc_on_counterparty_tx_msat * dust_htlc_on_counterparty_tx + commitment_tx_cost + 4,
10125                                         max_dust_htlc_exposure_msat), 1);
10126                 }
10127         } else if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound {
10128                 route.paths[0].hops.last_mut().unwrap().fee_msat = 2_500_000;
10129                 // For the multiplier dust exposure limit, since it scales with feerate,
10130                 // we need to add a lot of HTLCs that will become dust at the new feerate
10131                 // to cross the threshold.
10132                 for _ in 0..AT_FEE_OUTBOUND_HTLCS {
10133                         let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[1], Some(1_000), None);
10134                         nodes[0].node.send_payment_with_route(&route, payment_hash,
10135                                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10136                 }
10137                 {
10138                         let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10139                         *feerate_lock = *feerate_lock * 10;
10140                 }
10141                 nodes[0].node.timer_tick_occurred();
10142                 check_added_monitors!(nodes[0], 1);
10143                 nodes[0].logger.assert_log_contains("lightning::ln::channel", "Cannot afford to send new feerate at 2530 without infringing max dust htlc exposure", 1);
10144         }
10145
10146         let _ = nodes[0].node.get_and_clear_pending_msg_events();
10147         let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
10148         added_monitors.clear();
10149 }
10150
10151 fn do_test_max_dust_htlc_exposure_by_threshold_type(multiplier_dust_limit: bool, apply_excess_fee: bool) {
10152         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit, apply_excess_fee);
10153         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit, apply_excess_fee);
10154         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit, apply_excess_fee);
10155         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit, apply_excess_fee);
10156         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit, apply_excess_fee);
10157         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit, apply_excess_fee);
10158         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit, apply_excess_fee);
10159         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit, apply_excess_fee);
10160         if !multiplier_dust_limit && !apply_excess_fee {
10161                 // Because non-dust HTLC transaction fees are included in the dust exposure, trying to
10162                 // increase the fee to hit a higher dust exposure with a
10163                 // `MaxDustHTLCExposure::FeeRateMultiplier` is no longer super practical, so we skip these
10164                 // in the `multiplier_dust_limit` case.
10165                 do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit, apply_excess_fee);
10166                 do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit, apply_excess_fee);
10167                 do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit, apply_excess_fee);
10168                 do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit, apply_excess_fee);
10169         }
10170 }
10171
10172 #[test]
10173 fn test_max_dust_htlc_exposure() {
10174         do_test_max_dust_htlc_exposure_by_threshold_type(false, false);
10175         do_test_max_dust_htlc_exposure_by_threshold_type(false, true);
10176         do_test_max_dust_htlc_exposure_by_threshold_type(true, false);
10177         do_test_max_dust_htlc_exposure_by_threshold_type(true, true);
10178 }
10179
10180 #[test]
10181 fn test_nondust_htlc_fees_are_dust() {
10182         // Test that the transaction fees paid in nondust HTLCs count towards our dust limit
10183         let chanmon_cfgs = create_chanmon_cfgs(3);
10184         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10185
10186         let mut config = test_default_channel_config();
10187         // Set the dust limit to the default value
10188         config.channel_config.max_dust_htlc_exposure =
10189                 MaxDustHTLCExposure::FeeRateMultiplier(10_000);
10190         // Make sure the HTLC limits don't get in the way
10191         config.channel_handshake_limits.min_max_accepted_htlcs = 400;
10192         config.channel_handshake_config.our_max_accepted_htlcs = 400;
10193         config.channel_handshake_config.our_htlc_minimum_msat = 1;
10194
10195         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config), Some(config), Some(config)]);
10196         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10197
10198         // Create a channel from 1 -> 0 but immediately push all of the funds towards 0
10199         let chan_id_1 = create_announced_chan_between_nodes(&nodes, 1, 0).2;
10200         while nodes[1].node.list_channels()[0].next_outbound_htlc_limit_msat > 0 {
10201                 send_payment(&nodes[1], &[&nodes[0]], nodes[1].node.list_channels()[0].next_outbound_htlc_limit_msat);
10202         }
10203
10204         // First get the channel one HTLC_VALUE HTLC away from the dust limit by sending dust HTLCs
10205         // repeatedly until we run out of space.
10206         const HTLC_VALUE: u64 = 1_000_000; // Doesn't matter, tune until the test passes
10207         let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], HTLC_VALUE).0;
10208
10209         while nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat == 0 {
10210                 route_payment(&nodes[0], &[&nodes[1]], HTLC_VALUE);
10211         }
10212         assert_ne!(nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat, 0,
10213                 "We don't want to run out of ability to send because of some non-dust limit");
10214         assert!(nodes[0].node.list_channels()[0].pending_outbound_htlcs.len() < 10,
10215                 "We should be able to fill our dust limit without too many HTLCs");
10216
10217         let dust_limit = nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat;
10218         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
10219         assert_ne!(nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat, 0,
10220                 "Make sure we are able to send once we clear one HTLC");
10221
10222         // At this point we have somewhere between dust_limit and dust_limit * 2 left in our dust
10223         // exposure limit, and we want to max that out using non-dust HTLCs.
10224         let commitment_tx_per_htlc_cost =
10225                 htlc_success_tx_weight(&ChannelTypeFeatures::empty()) * 253;
10226         let max_htlcs_remaining = dust_limit * 2 / commitment_tx_per_htlc_cost;
10227         assert!(max_htlcs_remaining < 30,
10228                 "We should be able to fill our dust limit without too many HTLCs");
10229         for i in 0..max_htlcs_remaining + 1 {
10230                 assert_ne!(i, max_htlcs_remaining);
10231                 if nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat < dust_limit {
10232                         // We found our limit, and it was less than max_htlcs_remaining!
10233                         // At this point we can only send dust HTLCs as any non-dust HTLCs will overuse our
10234                         // remaining dust exposure.
10235                         break;
10236                 }
10237                 route_payment(&nodes[0], &[&nodes[1]], dust_limit * 2);
10238         }
10239
10240         // At this point non-dust HTLCs are no longer accepted from node 0 -> 1, we also check that
10241         // such HTLCs can't be routed over the same channel either.
10242         create_announced_chan_between_nodes(&nodes, 2, 0);
10243         let (route, payment_hash, _, payment_secret) =
10244                 get_route_and_payment_hash!(nodes[2], nodes[1], dust_limit * 2);
10245         let onion = RecipientOnionFields::secret_only(payment_secret);
10246         nodes[2].node.send_payment_with_route(&route, payment_hash, onion, PaymentId([0; 32])).unwrap();
10247         check_added_monitors(&nodes[2], 1);
10248         let send = SendEvent::from_node(&nodes[2]);
10249
10250         nodes[0].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send.msgs[0]);
10251         commitment_signed_dance!(nodes[0], nodes[2], send.commitment_msg, false, true);
10252
10253         expect_pending_htlcs_forwardable!(nodes[0]);
10254         check_added_monitors(&nodes[0], 1);
10255         let node_id_1 = nodes[1].node.get_our_node_id();
10256         expect_htlc_handling_failed_destinations!(
10257                 nodes[0].node.get_and_clear_pending_events(),
10258                 &[HTLCDestination::NextHopChannel { node_id: Some(node_id_1), channel_id: chan_id_1 }]
10259         );
10260
10261         let fail = get_htlc_update_msgs(&nodes[0], &nodes[2].node.get_our_node_id());
10262         nodes[2].node.handle_update_fail_htlc(&nodes[0].node.get_our_node_id(), &fail.update_fail_htlcs[0]);
10263         commitment_signed_dance!(nodes[2], nodes[0], fail.commitment_signed, false);
10264         expect_payment_failed_conditions(&nodes[2], payment_hash, false, PaymentFailedConditions::new());
10265 }
10266
10267
10268 #[test]
10269 fn test_non_final_funding_tx() {
10270         let chanmon_cfgs = create_chanmon_cfgs(2);
10271         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10272         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10273         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10274
10275         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
10276         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10277         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10278         let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10279         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10280
10281         let best_height = nodes[0].node.best_block.read().unwrap().height;
10282
10283         let chan_id = *nodes[0].network_chan_count.borrow();
10284         let events = nodes[0].node.get_and_clear_pending_events();
10285         let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::ScriptBuf::new(), sequence: Sequence(1), witness: Witness::from_slice(&[&[1]]) };
10286         assert_eq!(events.len(), 1);
10287         let mut tx = match events[0] {
10288                 Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => {
10289                         // Timelock the transaction _beyond_ the best client height + 1.
10290                         Transaction { version: Version(chan_id as i32), lock_time: LockTime::from_height(best_height + 2).unwrap(), input: vec![input], output: vec![TxOut {
10291                                 value: Amount::from_sat(*channel_value_satoshis), script_pubkey: output_script.clone(),
10292                         }]}
10293                 },
10294                 _ => panic!("Unexpected event"),
10295         };
10296         // Transaction should fail as it's evaluated as non-final for propagation.
10297         match nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()) {
10298                 Err(APIError::APIMisuseError { err }) => {
10299                         assert_eq!(format!("Funding transaction absolute timelock is non-final"), err);
10300                 },
10301                 _ => panic!()
10302         }
10303         let err = "Error in transaction funding: Misuse error: Funding transaction absolute timelock is non-final".to_owned();
10304         check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(temp_channel_id, false, ClosureReason::ProcessingError { err })]);
10305         assert_eq!(get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id()).data, "Failed to fund channel");
10306 }
10307
10308 #[test]
10309 fn test_non_final_funding_tx_within_headroom() {
10310         let chanmon_cfgs = create_chanmon_cfgs(2);
10311         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10312         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10313         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10314
10315         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
10316         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10317         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10318         let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10319         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10320
10321         let best_height = nodes[0].node.best_block.read().unwrap().height;
10322
10323         let chan_id = *nodes[0].network_chan_count.borrow();
10324         let events = nodes[0].node.get_and_clear_pending_events();
10325         let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::ScriptBuf::new(), sequence: Sequence(1), witness: Witness::from_slice(&[[1]]) };
10326         assert_eq!(events.len(), 1);
10327         let mut tx = match events[0] {
10328                 Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => {
10329                         // Timelock the transaction within a +1 headroom from the best block.
10330                         Transaction { version: Version(chan_id as i32), lock_time: LockTime::from_consensus(best_height + 1), input: vec![input], output: vec![TxOut {
10331                                 value: Amount::from_sat(*channel_value_satoshis), script_pubkey: output_script.clone(),
10332                         }]}
10333                 },
10334                 _ => panic!("Unexpected event"),
10335         };
10336
10337         // Transaction should be accepted if it's in a +1 headroom from best block.
10338         assert!(nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
10339         get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
10340 }
10341
10342 #[test]
10343 fn accept_busted_but_better_fee() {
10344         // If a peer sends us a fee update that is too low, but higher than our previous channel
10345         // feerate, we should accept it. In the future we may want to consider closing the channel
10346         // later, but for now we only accept the update.
10347         let mut chanmon_cfgs = create_chanmon_cfgs(2);
10348         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10349         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10350         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10351
10352         create_chan_between_nodes(&nodes[0], &nodes[1]);
10353
10354         // Set nodes[1] to expect 5,000 sat/kW.
10355         {
10356                 let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
10357                 *feerate_lock = 5000;
10358         }
10359
10360         // If nodes[0] increases their feerate, even if its not enough, nodes[1] should accept it.
10361         {
10362                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10363                 *feerate_lock = 1000;
10364         }
10365         nodes[0].node.timer_tick_occurred();
10366         check_added_monitors!(nodes[0], 1);
10367
10368         let events = nodes[0].node.get_and_clear_pending_msg_events();
10369         assert_eq!(events.len(), 1);
10370         match events[0] {
10371                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
10372                         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
10373                         commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false);
10374                 },
10375                 _ => panic!("Unexpected event"),
10376         };
10377
10378         // If nodes[0] increases their feerate further, even if its not enough, nodes[1] should accept
10379         // it.
10380         {
10381                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10382                 *feerate_lock = 2000;
10383         }
10384         nodes[0].node.timer_tick_occurred();
10385         check_added_monitors!(nodes[0], 1);
10386
10387         let events = nodes[0].node.get_and_clear_pending_msg_events();
10388         assert_eq!(events.len(), 1);
10389         match events[0] {
10390                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
10391                         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
10392                         commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false);
10393                 },
10394                 _ => panic!("Unexpected event"),
10395         };
10396
10397         // However, if nodes[0] decreases their feerate, nodes[1] should reject it and close the
10398         // channel.
10399         {
10400                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10401                 *feerate_lock = 1000;
10402         }
10403         nodes[0].node.timer_tick_occurred();
10404         check_added_monitors!(nodes[0], 1);
10405
10406         let events = nodes[0].node.get_and_clear_pending_msg_events();
10407         assert_eq!(events.len(), 1);
10408         match events[0] {
10409                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
10410                         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
10411                         check_closed_event!(nodes[1], 1, ClosureReason::PeerFeerateTooLow {
10412                                 peer_feerate_sat_per_kw: 1000, required_feerate_sat_per_kw: 5000,
10413                         }, [nodes[0].node.get_our_node_id()], 100000);
10414                         check_closed_broadcast!(nodes[1], true);
10415                         check_added_monitors!(nodes[1], 1);
10416                 },
10417                 _ => panic!("Unexpected event"),
10418         };
10419 }
10420
10421 fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash: bool) {
10422         let mut chanmon_cfgs = create_chanmon_cfgs(2);
10423         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10424         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10425         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10426         let min_final_cltv_expiry_delta = 120;
10427         let final_cltv_expiry_delta = if valid_delta { min_final_cltv_expiry_delta + 2 } else {
10428                 min_final_cltv_expiry_delta - 2 };
10429         let recv_value = 100_000;
10430
10431         create_chan_between_nodes(&nodes[0], &nodes[1]);
10432
10433         let payment_parameters = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), final_cltv_expiry_delta as u32);
10434         let (payment_hash, payment_preimage, payment_secret) = if use_user_hash {
10435                 let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[1],
10436                         Some(recv_value), Some(min_final_cltv_expiry_delta));
10437                 (payment_hash, payment_preimage, payment_secret)
10438         } else {
10439                 let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(recv_value), 7200, Some(min_final_cltv_expiry_delta)).unwrap();
10440                 (payment_hash, nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap(), payment_secret)
10441         };
10442         let route = get_route!(nodes[0], payment_parameters, recv_value).unwrap();
10443         nodes[0].node.send_payment_with_route(&route, payment_hash,
10444                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10445         check_added_monitors!(nodes[0], 1);
10446         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
10447         assert_eq!(events.len(), 1);
10448         let mut payment_event = SendEvent::from_event(events.pop().unwrap());
10449         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
10450         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
10451         expect_pending_htlcs_forwardable!(nodes[1]);
10452
10453         if valid_delta {
10454                 expect_payment_claimable!(nodes[1], payment_hash, payment_secret, recv_value, if use_user_hash {
10455                         None } else { Some(payment_preimage) }, nodes[1].node.get_our_node_id());
10456
10457                 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
10458         } else {
10459                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
10460
10461                 check_added_monitors!(nodes[1], 1);
10462
10463                 let fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
10464                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates.update_fail_htlcs[0]);
10465                 commitment_signed_dance!(nodes[0], nodes[1], fail_updates.commitment_signed, false, true);
10466
10467                 expect_payment_failed!(nodes[0], payment_hash, true);
10468         }
10469 }
10470
10471 #[test]
10472 fn test_payment_with_custom_min_cltv_expiry_delta() {
10473         do_payment_with_custom_min_final_cltv_expiry(false, false);
10474         do_payment_with_custom_min_final_cltv_expiry(false, true);
10475         do_payment_with_custom_min_final_cltv_expiry(true, false);
10476         do_payment_with_custom_min_final_cltv_expiry(true, true);
10477 }
10478
10479 #[test]
10480 fn test_disconnects_peer_awaiting_response_ticks() {
10481         // Tests that nodes which are awaiting on a response critical for channel responsiveness
10482         // disconnect their counterparty after `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10483         let mut chanmon_cfgs = create_chanmon_cfgs(2);
10484         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10485         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10486         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10487
10488         // Asserts a disconnect event is queued to the user.
10489         let check_disconnect_event = |node: &Node, should_disconnect: bool| {
10490                 let disconnect_event = node.node.get_and_clear_pending_msg_events().iter().find_map(|event|
10491                         if let MessageSendEvent::HandleError { action, .. } = event {
10492                                 if let msgs::ErrorAction::DisconnectPeerWithWarning { .. } = action {
10493                                         Some(())
10494                                 } else {
10495                                         None
10496                                 }
10497                         } else {
10498                                 None
10499                         }
10500                 );
10501                 assert_eq!(disconnect_event.is_some(), should_disconnect);
10502         };
10503
10504         // Fires timer ticks ensuring we only attempt to disconnect peers after reaching
10505         // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10506         let check_disconnect = |node: &Node| {
10507                 // No disconnect without any timer ticks.
10508                 check_disconnect_event(node, false);
10509
10510                 // No disconnect with 1 timer tick less than required.
10511                 for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS - 1 {
10512                         node.node.timer_tick_occurred();
10513                         check_disconnect_event(node, false);
10514                 }
10515
10516                 // Disconnect after reaching the required ticks.
10517                 node.node.timer_tick_occurred();
10518                 check_disconnect_event(node, true);
10519
10520                 // Disconnect again on the next tick if the peer hasn't been disconnected yet.
10521                 node.node.timer_tick_occurred();
10522                 check_disconnect_event(node, true);
10523         };
10524
10525         create_chan_between_nodes(&nodes[0], &nodes[1]);
10526
10527         // We'll start by performing a fee update with Alice (nodes[0]) on the channel.
10528         *nodes[0].fee_estimator.sat_per_kw.lock().unwrap() *= 2;
10529         nodes[0].node.timer_tick_occurred();
10530         check_added_monitors!(&nodes[0], 1);
10531         let alice_fee_update = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
10532         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), alice_fee_update.update_fee.as_ref().unwrap());
10533         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &alice_fee_update.commitment_signed);
10534         check_added_monitors!(&nodes[1], 1);
10535
10536         // This will prompt Bob (nodes[1]) to respond with his `CommitmentSigned` and `RevokeAndACK`.
10537         let (bob_revoke_and_ack, bob_commitment_signed) = get_revoke_commit_msgs!(&nodes[1], nodes[0].node.get_our_node_id());
10538         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revoke_and_ack);
10539         check_added_monitors!(&nodes[0], 1);
10540         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_commitment_signed);
10541         check_added_monitors(&nodes[0], 1);
10542
10543         // Alice then needs to send her final `RevokeAndACK` to complete the commitment dance. We
10544         // pretend Bob hasn't received the message and check whether he'll disconnect Alice after
10545         // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10546         let alice_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
10547         check_disconnect(&nodes[1]);
10548
10549         // Now, we'll reconnect them to test awaiting a `ChannelReestablish` message.
10550         //
10551         // Note that since the commitment dance didn't complete above, Alice is expected to resend her
10552         // final `RevokeAndACK` to Bob to complete it.
10553         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
10554         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10555         let bob_init = msgs::Init {
10556                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
10557         };
10558         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &bob_init, true).unwrap();
10559         let alice_init = msgs::Init {
10560                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
10561         };
10562         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &alice_init, true).unwrap();
10563
10564         // Upon reconnection, Alice sends her `ChannelReestablish` to Bob. Alice, however, hasn't
10565         // received Bob's yet, so she should disconnect him after reaching
10566         // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10567         let alice_channel_reestablish = get_event_msg!(
10568                 nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()
10569         );
10570         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &alice_channel_reestablish);
10571         check_disconnect(&nodes[0]);
10572
10573         // Bob now sends his `ChannelReestablish` to Alice to resume the channel and consider it "live".
10574         let bob_channel_reestablish = nodes[1].node.get_and_clear_pending_msg_events().iter().find_map(|event|
10575                 if let MessageSendEvent::SendChannelReestablish { node_id, msg } = event {
10576                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
10577                         Some(msg.clone())
10578                 } else {
10579                         None
10580                 }
10581         ).unwrap();
10582         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bob_channel_reestablish);
10583
10584         // Sanity check that Alice won't disconnect Bob since she's no longer waiting for any messages.
10585         for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS {
10586                 nodes[0].node.timer_tick_occurred();
10587                 check_disconnect_event(&nodes[0], false);
10588         }
10589
10590         // However, Bob is still waiting on Alice's `RevokeAndACK`, so he should disconnect her after
10591         // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10592         check_disconnect(&nodes[1]);
10593
10594         // Finally, have Bob process the last message.
10595         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &alice_revoke_and_ack);
10596         check_added_monitors(&nodes[1], 1);
10597
10598         // At this point, neither node should attempt to disconnect each other, since they aren't
10599         // waiting on any messages.
10600         for node in &nodes {
10601                 for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS {
10602                         node.node.timer_tick_occurred();
10603                         check_disconnect_event(node, false);
10604                 }
10605         }
10606 }
10607
10608 #[test]
10609 fn test_remove_expired_outbound_unfunded_channels() {
10610         let chanmon_cfgs = create_chanmon_cfgs(2);
10611         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10612         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10613         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10614
10615         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
10616         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10617         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10618         let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10619         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10620
10621         let events = nodes[0].node.get_and_clear_pending_events();
10622         assert_eq!(events.len(), 1);
10623         match events[0] {
10624                 Event::FundingGenerationReady { .. } => (),
10625                 _ => panic!("Unexpected event"),
10626         };
10627
10628         // Asserts the outbound channel has been removed from a nodes[0]'s peer state map.
10629         let check_outbound_channel_existence = |should_exist: bool| {
10630                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
10631                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
10632                 assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist);
10633         };
10634
10635         // Channel should exist without any timer ticks.
10636         check_outbound_channel_existence(true);
10637
10638         // Channel should exist with 1 timer tick less than required.
10639         for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
10640                 nodes[0].node.timer_tick_occurred();
10641                 check_outbound_channel_existence(true)
10642         }
10643
10644         // Remove channel after reaching the required ticks.
10645         nodes[0].node.timer_tick_occurred();
10646         check_outbound_channel_existence(false);
10647
10648         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
10649         assert_eq!(msg_events.len(), 1);
10650         match msg_events[0] {
10651                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
10652                         assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake");
10653                 },
10654                 _ => panic!("Unexpected event"),
10655         }
10656         check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, false, &[nodes[1].node.get_our_node_id()], 100000);
10657 }
10658
10659 #[test]
10660 fn test_remove_expired_inbound_unfunded_channels() {
10661         let chanmon_cfgs = create_chanmon_cfgs(2);
10662         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10663         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10664         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10665
10666         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
10667         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10668         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10669         let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10670         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10671
10672         let events = nodes[0].node.get_and_clear_pending_events();
10673         assert_eq!(events.len(), 1);
10674         match events[0] {
10675                 Event::FundingGenerationReady { .. } => (),
10676                 _ => panic!("Unexpected event"),
10677         };
10678
10679         // Asserts the inbound channel has been removed from a nodes[1]'s peer state map.
10680         let check_inbound_channel_existence = |should_exist: bool| {
10681                 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
10682                 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
10683                 assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist);
10684         };
10685
10686         // Channel should exist without any timer ticks.
10687         check_inbound_channel_existence(true);
10688
10689         // Channel should exist with 1 timer tick less than required.
10690         for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
10691                 nodes[1].node.timer_tick_occurred();
10692                 check_inbound_channel_existence(true)
10693         }
10694
10695         // Remove channel after reaching the required ticks.
10696         nodes[1].node.timer_tick_occurred();
10697         check_inbound_channel_existence(false);
10698
10699         let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
10700         assert_eq!(msg_events.len(), 1);
10701         match msg_events[0] {
10702                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
10703                         assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake");
10704                 },
10705                 _ => panic!("Unexpected event"),
10706         }
10707         check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, false, &[nodes[0].node.get_our_node_id()], 100000);
10708 }
10709
10710 #[test]
10711 fn test_channel_close_when_not_timely_accepted() {
10712         // Create network of two nodes
10713         let chanmon_cfgs = create_chanmon_cfgs(2);
10714         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10715         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10716         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10717
10718         // Simulate peer-disconnects mid-handshake
10719         // The channel is initiated from the node 0 side,
10720         // but the nodes disconnect before node 1 could send accept channel
10721         let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
10722         let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10723         assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id);
10724
10725         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
10726         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10727
10728         // Make sure that we have not removed the OutboundV1Channel from node[0] immediately.
10729         assert_eq!(nodes[0].node.list_channels().len(), 1);
10730
10731         // Since channel was inbound from node[1] perspective, it should have been dropped immediately.
10732         assert_eq!(nodes[1].node.list_channels().len(), 0);
10733
10734         // In the meantime, some time passes.
10735         for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS {
10736                 nodes[0].node.timer_tick_occurred();
10737         }
10738
10739         // Since we disconnected from peer and did not connect back within time,
10740         // we should have forced-closed the channel by now.
10741         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, [nodes[1].node.get_our_node_id()], 100000);
10742         assert_eq!(nodes[0].node.list_channels().len(), 0);
10743
10744         {
10745                 // Since accept channel message was never received
10746                 // The channel should be forced close by now from node 0 side
10747                 // and the peer removed from per_peer_state
10748                 let node_0_per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
10749                 assert_eq!(node_0_per_peer_state.len(), 0);
10750         }
10751 }
10752
10753 #[test]
10754 fn test_rebroadcast_open_channel_when_reconnect_mid_handshake() {
10755         // Create network of two nodes
10756         let chanmon_cfgs = create_chanmon_cfgs(2);
10757         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10758         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10759         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10760
10761         // Simulate peer-disconnects mid-handshake
10762         // The channel is initiated from the node 0 side,
10763         // but the nodes disconnect before node 1 could send accept channel
10764         let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
10765         let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10766         assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id);
10767
10768         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
10769         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10770
10771         // Make sure that we have not removed the OutboundV1Channel from node[0] immediately.
10772         assert_eq!(nodes[0].node.list_channels().len(), 1);
10773
10774         // Since channel was inbound from node[1] perspective, it should have been immediately dropped.
10775         assert_eq!(nodes[1].node.list_channels().len(), 0);
10776
10777         // The peers now reconnect
10778         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
10779                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
10780         }, true).unwrap();
10781         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
10782                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
10783         }, false).unwrap();
10784
10785         // Make sure the SendOpenChannel message is added to node_0 pending message events
10786         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
10787         assert_eq!(msg_events.len(), 1);
10788         match &msg_events[0] {
10789                 MessageSendEvent::SendOpenChannel { msg, .. } => assert_eq!(msg, &open_channel_msg),
10790                 _ => panic!("Unexpected message."),
10791         }
10792 }
10793
10794 fn do_test_multi_post_event_actions(do_reload: bool) {
10795         // Tests handling multiple post-Event actions at once.
10796         // There is specific code in ChannelManager to handle channels where multiple post-Event
10797         // `ChannelMonitorUpdates` are pending at once. This test exercises that code.
10798         //
10799         // Specifically, we test calling `get_and_clear_pending_events` while there are two
10800         // PaymentSents from different channels and one channel has two pending `ChannelMonitorUpdate`s
10801         // - one from an RAA and one from an inbound commitment_signed.
10802         let chanmon_cfgs = create_chanmon_cfgs(3);
10803         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10804         let (persister, chain_monitor);
10805         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
10806         let nodes_0_deserialized;
10807         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10808
10809         let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
10810         let chan_id_2 = create_announced_chan_between_nodes(&nodes, 0, 2).2;
10811
10812         send_payment(&nodes[0], &[&nodes[1]], 1_000_000);
10813         send_payment(&nodes[0], &[&nodes[2]], 1_000_000);
10814
10815         let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
10816         let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[2]], 1_000_000);
10817
10818         nodes[1].node.claim_funds(our_payment_preimage);
10819         check_added_monitors!(nodes[1], 1);
10820         expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000);
10821
10822         nodes[2].node.claim_funds(payment_preimage_2);
10823         check_added_monitors!(nodes[2], 1);
10824         expect_payment_claimed!(nodes[2], payment_hash_2, 1_000_000);
10825
10826         for dest in &[1, 2] {
10827                 let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[*dest], nodes[0].node.get_our_node_id());
10828                 nodes[0].node.handle_update_fulfill_htlc(&nodes[*dest].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]);
10829                 commitment_signed_dance!(nodes[0], nodes[*dest], htlc_fulfill_updates.commitment_signed, false);
10830                 check_added_monitors(&nodes[0], 0);
10831         }
10832
10833         let (route, payment_hash_3, _, payment_secret_3) =
10834                 get_route_and_payment_hash!(nodes[1], nodes[0], 100_000);
10835         let payment_id = PaymentId(payment_hash_3.0);
10836         nodes[1].node.send_payment_with_route(&route, payment_hash_3,
10837                 RecipientOnionFields::secret_only(payment_secret_3), payment_id).unwrap();
10838         check_added_monitors(&nodes[1], 1);
10839
10840         let send_event = SendEvent::from_node(&nodes[1]);
10841         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
10842         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event.commitment_msg);
10843         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
10844
10845         if do_reload {
10846                 let nodes_0_serialized = nodes[0].node.encode();
10847                 let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
10848                 let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_2).encode();
10849                 reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, chain_monitor, nodes_0_deserialized);
10850
10851                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10852                 nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10853
10854                 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
10855                 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[2]));
10856         }
10857
10858         let events = nodes[0].node.get_and_clear_pending_events();
10859         assert_eq!(events.len(), 4);
10860         if let Event::PaymentSent { payment_preimage, .. } = events[0] {
10861                 assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2);
10862         } else { panic!(); }
10863         if let Event::PaymentSent { payment_preimage, .. } = events[1] {
10864                 assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2);
10865         } else { panic!(); }
10866         if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); }
10867         if let Event::PaymentPathSuccessful { .. } = events[3] {} else { panic!(); }
10868
10869         // After the events are processed, the ChannelMonitorUpdates will be released and, upon their
10870         // completion, we'll respond to nodes[1] with an RAA + CS.
10871         get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
10872         check_added_monitors(&nodes[0], 3);
10873 }
10874
10875 #[test]
10876 fn test_multi_post_event_actions() {
10877         do_test_multi_post_event_actions(true);
10878         do_test_multi_post_event_actions(false);
10879 }
10880
10881 #[test]
10882 fn test_batch_channel_open() {
10883         let chanmon_cfgs = create_chanmon_cfgs(3);
10884         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10885         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
10886         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10887
10888         // Initiate channel opening and create the batch channel funding transaction.
10889         let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
10890                 (&nodes[1], 100_000, 0, 42, None),
10891                 (&nodes[2], 200_000, 0, 43, None),
10892         ]);
10893
10894         // Go through the funding_created and funding_signed flow with node 1.
10895         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
10896         check_added_monitors(&nodes[1], 1);
10897         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
10898
10899         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
10900         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
10901         check_added_monitors(&nodes[0], 1);
10902
10903         // The transaction should not have been broadcast before all channels are ready.
10904         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
10905
10906         // Go through the funding_created and funding_signed flow with node 2.
10907         nodes[2].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[1]);
10908         check_added_monitors(&nodes[2], 1);
10909         expect_channel_pending_event(&nodes[2], &nodes[0].node.get_our_node_id());
10910
10911         let funding_signed_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
10912         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
10913         nodes[0].node.handle_funding_signed(&nodes[2].node.get_our_node_id(), &funding_signed_msg);
10914         check_added_monitors(&nodes[0], 1);
10915
10916         // The transaction should not have been broadcast before persisting all monitors has been
10917         // completed.
10918         assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
10919         assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
10920
10921         // Complete the persistence of the monitor.
10922         nodes[0].chain_monitor.complete_sole_pending_chan_update(
10923                 &ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 1 })
10924         );
10925         let events = nodes[0].node.get_and_clear_pending_events();
10926
10927         // The transaction should only have been broadcast now.
10928         let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
10929         assert_eq!(broadcasted_txs.len(), 1);
10930         assert_eq!(broadcasted_txs[0], tx);
10931
10932         assert_eq!(events.len(), 2);
10933         assert!(events.iter().any(|e| matches!(
10934                 *e,
10935                 crate::events::Event::ChannelPending {
10936                         ref counterparty_node_id,
10937                         ..
10938                 } if counterparty_node_id == &nodes[1].node.get_our_node_id(),
10939         )));
10940         assert!(events.iter().any(|e| matches!(
10941                 *e,
10942                 crate::events::Event::ChannelPending {
10943                         ref counterparty_node_id,
10944                         ..
10945                 } if counterparty_node_id == &nodes[2].node.get_our_node_id(),
10946         )));
10947 }
10948
10949 #[test]
10950 fn test_close_in_funding_batch() {
10951         // This test ensures that if one of the channels
10952         // in the batch closes, the complete batch will close.
10953         let chanmon_cfgs = create_chanmon_cfgs(3);
10954         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10955         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
10956         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10957
10958         // Initiate channel opening and create the batch channel funding transaction.
10959         let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
10960                 (&nodes[1], 100_000, 0, 42, None),
10961                 (&nodes[2], 200_000, 0, 43, None),
10962         ]);
10963
10964         // Go through the funding_created and funding_signed flow with node 1.
10965         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
10966         check_added_monitors(&nodes[1], 1);
10967         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
10968
10969         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
10970         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
10971         check_added_monitors(&nodes[0], 1);
10972
10973         // The transaction should not have been broadcast before all channels are ready.
10974         assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
10975
10976         // Force-close the channel for which we've completed the initial monitor.
10977         let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
10978         let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
10979         let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
10980         let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
10981         let error_message = "Channel force-closed";
10982         nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
10983
10984         // The monitor should become closed.
10985         check_added_monitors(&nodes[0], 1);
10986         {
10987                 let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
10988                 let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
10989                 assert_eq!(monitor_updates_1.len(), 1);
10990                 assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
10991         }
10992
10993         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
10994         match msg_events[0] {
10995                 MessageSendEvent::HandleError { .. } => (),
10996                 _ => panic!("Unexpected message."),
10997         }
10998
10999         // We broadcast the commitment transaction as part of the force-close.
11000         {
11001                 let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
11002                 assert_eq!(broadcasted_txs.len(), 1);
11003                 assert!(broadcasted_txs[0].txid() != tx.txid());
11004                 assert_eq!(broadcasted_txs[0].input.len(), 1);
11005                 assert_eq!(broadcasted_txs[0].input[0].previous_output.txid, tx.txid());
11006         }
11007
11008         // All channels in the batch should close immediately.
11009         check_closed_events(&nodes[0], &[
11010                 ExpectedCloseEvent {
11011                         channel_id: Some(channel_id_1),
11012                         discard_funding: true,
11013                         channel_funding_txo: Some(funding_txo_1),
11014                         user_channel_id: Some(42),
11015                         ..Default::default()
11016                 },
11017                 ExpectedCloseEvent {
11018                         channel_id: Some(channel_id_2),
11019                         discard_funding: true,
11020                         channel_funding_txo: Some(funding_txo_2),
11021                         user_channel_id: Some(43),
11022                         ..Default::default()
11023                 },
11024         ]);
11025
11026         // Ensure the channels don't exist anymore.
11027         assert!(nodes[0].node.list_channels().is_empty());
11028 }
11029
11030 #[test]
11031 fn test_batch_funding_close_after_funding_signed() {
11032         let chanmon_cfgs = create_chanmon_cfgs(3);
11033         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
11034         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
11035         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
11036
11037         // Initiate channel opening and create the batch channel funding transaction.
11038         let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
11039                 (&nodes[1], 100_000, 0, 42, None),
11040                 (&nodes[2], 200_000, 0, 43, None),
11041         ]);
11042
11043         // Go through the funding_created and funding_signed flow with node 1.
11044         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
11045         check_added_monitors(&nodes[1], 1);
11046         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
11047
11048         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
11049         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
11050         check_added_monitors(&nodes[0], 1);
11051
11052         // Go through the funding_created and funding_signed flow with node 2.
11053         nodes[2].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[1]);
11054         check_added_monitors(&nodes[2], 1);
11055         expect_channel_pending_event(&nodes[2], &nodes[0].node.get_our_node_id());
11056
11057         let funding_signed_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
11058         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
11059         nodes[0].node.handle_funding_signed(&nodes[2].node.get_our_node_id(), &funding_signed_msg);
11060         check_added_monitors(&nodes[0], 1);
11061
11062         // The transaction should not have been broadcast before all channels are ready.
11063         assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
11064
11065         // Force-close the channel for which we've completed the initial monitor.
11066         let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
11067         let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
11068         let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
11069         let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
11070         let error_message = "Channel force-closed";
11071         nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
11072         check_added_monitors(&nodes[0], 2);
11073         {
11074                 let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
11075                 let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
11076                 assert_eq!(monitor_updates_1.len(), 1);
11077                 assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
11078                 let monitor_updates_2 = monitor_updates.get(&channel_id_2).unwrap();
11079                 assert_eq!(monitor_updates_2.len(), 1);
11080                 assert_eq!(monitor_updates_2[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
11081         }
11082         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
11083         match msg_events[0] {
11084                 MessageSendEvent::HandleError { .. } => (),
11085                 _ => panic!("Unexpected message."),
11086         }
11087
11088         // We broadcast the commitment transaction as part of the force-close.
11089         {
11090                 let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
11091                 assert_eq!(broadcasted_txs.len(), 1);
11092                 assert!(broadcasted_txs[0].txid() != tx.txid());
11093                 assert_eq!(broadcasted_txs[0].input.len(), 1);
11094                 assert_eq!(broadcasted_txs[0].input[0].previous_output.txid, tx.txid());
11095         }
11096
11097         // All channels in the batch should close immediately.
11098         check_closed_events(&nodes[0], &[
11099                 ExpectedCloseEvent {
11100                         channel_id: Some(channel_id_1),
11101                         discard_funding: true,
11102                         channel_funding_txo: Some(funding_txo_1),
11103                         user_channel_id: Some(42),
11104                         ..Default::default()
11105                 },
11106                 ExpectedCloseEvent {
11107                         channel_id: Some(channel_id_2),
11108                         discard_funding: true,
11109                         channel_funding_txo: Some(funding_txo_2),
11110                         user_channel_id: Some(43),
11111                         ..Default::default()
11112                 },
11113         ]);
11114
11115         // Ensure the channels don't exist anymore.
11116         assert!(nodes[0].node.list_channels().is_empty());
11117 }
11118
11119 fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitment: bool) {
11120         // Tests that a node will forget the channel (when it only requires 1 confirmation) if the
11121         // funding and commitment transaction confirm in the same block.
11122         let chanmon_cfgs = create_chanmon_cfgs(2);
11123         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
11124         let mut min_depth_1_block_cfg = test_default_channel_config();
11125         min_depth_1_block_cfg.channel_handshake_config.minimum_depth = 1;
11126         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(min_depth_1_block_cfg), Some(min_depth_1_block_cfg)]);
11127         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
11128
11129         let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
11130         let chan_id = ChannelId::v1_from_funding_outpoint(chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 });
11131
11132         assert_eq!(nodes[0].node.list_channels().len(), 1);
11133         assert_eq!(nodes[1].node.list_channels().len(), 1);
11134
11135         let (closing_node, other_node) = if confirm_remote_commitment {
11136                 (&nodes[1], &nodes[0])
11137         } else {
11138                 (&nodes[0], &nodes[1])
11139         };
11140         let error_message = "Channel force-closed";
11141         closing_node.node.force_close_broadcasting_latest_txn(&chan_id, &other_node.node.get_our_node_id(), error_message.to_string()).unwrap();
11142         let mut msg_events = closing_node.node.get_and_clear_pending_msg_events();
11143         assert_eq!(msg_events.len(), 1);
11144         match msg_events.pop().unwrap() {
11145                 MessageSendEvent::HandleError { action: msgs::ErrorAction::SendErrorMessage { .. }, .. } => {},
11146                 _ => panic!("Unexpected event"),
11147         }
11148         check_added_monitors(closing_node, 1);
11149         check_closed_event(closing_node, 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[other_node.node.get_our_node_id()], 1_000_000);
11150
11151         let commitment_tx = {
11152                 let mut txn = closing_node.tx_broadcaster.txn_broadcast();
11153                 assert_eq!(txn.len(), 1);
11154                 let commitment_tx = txn.pop().unwrap();
11155                 check_spends!(commitment_tx, funding_tx);
11156                 commitment_tx
11157         };
11158
11159         mine_transactions(&nodes[0], &[&funding_tx, &commitment_tx]);
11160         mine_transactions(&nodes[1], &[&funding_tx, &commitment_tx]);
11161
11162         check_closed_broadcast(other_node, 1, true);
11163         check_added_monitors(other_node, 1);
11164         check_closed_event(other_node, 1, ClosureReason::CommitmentTxConfirmed, false, &[closing_node.node.get_our_node_id()], 1_000_000);
11165
11166         assert!(nodes[0].node.list_channels().is_empty());
11167         assert!(nodes[1].node.list_channels().is_empty());
11168 }
11169
11170 #[test]
11171 fn test_funding_and_commitment_tx_confirm_same_block() {
11172         do_test_funding_and_commitment_tx_confirm_same_block(false);
11173         do_test_funding_and_commitment_tx_confirm_same_block(true);
11174 }
11175
11176 #[test]
11177 fn test_accept_inbound_channel_errors_queued() {
11178         // For manually accepted inbound channels, tests that a close error is correctly handled
11179         // and the channel fails for the initiator.
11180         let mut config0 = test_default_channel_config();
11181         let mut config1 = config0.clone();
11182         config1.channel_handshake_limits.their_to_self_delay = 1000;
11183         config1.manually_accept_inbound_channels = true;
11184         config0.channel_handshake_config.our_to_self_delay = 2000;
11185
11186         let chanmon_cfgs = create_chanmon_cfgs(2);
11187         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
11188         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config0), Some(config1)]);
11189         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
11190
11191         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
11192         let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
11193
11194         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
11195         let events = nodes[1].node.get_and_clear_pending_events();
11196         match events[0] {
11197                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
11198                         match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23) {
11199                                 Err(APIError::ChannelUnavailable { err: _ }) => (),
11200                                 _ => panic!(),
11201                         }
11202                 }
11203                 _ => panic!("Unexpected event"),
11204         }
11205         assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
11206                 open_channel_msg.common_fields.temporary_channel_id);
11207 }