]> git.bitcoin.ninja Git - rust-lightning/blob - lightning/src/ln/functional_tests.rs
Merge pull request #2248 from TheBlueMatt/2023-04-gossip-check
[rust-lightning] / lightning / src / ln / functional_tests.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 //! Tests that test standing up a network of ChannelManagers, creating channels, sending
11 //! payments/messages between them, and often checking the resulting ChannelMonitors are able to
12 //! claim outputs on-chain.
13
14 use crate::chain;
15 use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
16 use crate::chain::chaininterface::LowerBoundedFeeEstimator;
17 use crate::chain::channelmonitor;
18 use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
19 use crate::chain::transaction::OutPoint;
20 use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource, SignerProvider};
21 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
22 use crate::ln::{PaymentPreimage, PaymentSecret, PaymentHash};
23 use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel};
24 use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA};
25 use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError};
26 use crate::ln::{chan_utils, onion_utils};
27 use crate::ln::chan_utils::{OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment};
28 use crate::routing::gossip::{NetworkGraph, NetworkUpdate};
29 use crate::routing::router::{Path, PaymentParameters, Route, RouteHop, get_route};
30 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
31 use crate::ln::msgs;
32 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
33 use crate::util::enforcing_trait_impls::EnforcingSigner;
34 use crate::util::test_utils::{self, WatchtowerPersister};
35 use crate::util::errors::APIError;
36 use crate::util::ser::{Writeable, ReadableArgs};
37 use crate::util::string::UntrustedString;
38 use crate::util::config::{UserConfig, MaxDustHTLCExposure};
39
40 use bitcoin::hash_types::BlockHash;
41 use bitcoin::blockdata::script::{Builder, Script};
42 use bitcoin::blockdata::opcodes;
43 use bitcoin::blockdata::constants::genesis_block;
44 use bitcoin::network::constants::Network;
45 use bitcoin::{PackedLockTime, Sequence, Transaction, TxIn, TxOut, Witness};
46 use bitcoin::OutPoint as BitcoinOutPoint;
47
48 use bitcoin::secp256k1::Secp256k1;
49 use bitcoin::secp256k1::{PublicKey,SecretKey};
50
51 use regex;
52
53 use crate::io;
54 use crate::prelude::*;
55 use alloc::collections::BTreeSet;
56 use core::default::Default;
57 use core::iter::repeat;
58 use bitcoin::hashes::Hash;
59 use crate::sync::{Arc, Mutex};
60
61 use crate::ln::functional_test_utils::*;
62 use crate::ln::chan_utils::CommitmentTransaction;
63
64 use super::channel::UNFUNDED_CHANNEL_AGE_LIMIT_TICKS;
65
66 #[test]
67 fn test_insane_channel_opens() {
68         // Stand up a network of 2 nodes
69         use crate::ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS;
70         let mut cfg = UserConfig::default();
71         cfg.channel_handshake_limits.max_funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1;
72         let chanmon_cfgs = create_chanmon_cfgs(2);
73         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
74         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(cfg)]);
75         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
76
77         // Instantiate channel parameters where we push the maximum msats given our
78         // funding satoshis
79         let channel_value_sat = 31337; // same as funding satoshis
80         let channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg);
81         let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000;
82
83         // Have node0 initiate a channel to node1 with aforementioned parameters
84         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_sat, push_msat, 42, None).unwrap();
85
86         // Extract the channel open message from node0 to node1
87         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
88
89         // Test helper that asserts we get the correct error string given a mutator
90         // that supposedly makes the channel open message insane
91         let insane_open_helper = |expected_error_str: &str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| {
92                 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &message_mutator(open_channel_message.clone()));
93                 let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
94                 assert_eq!(msg_events.len(), 1);
95                 let expected_regex = regex::Regex::new(expected_error_str).unwrap();
96                 if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] {
97                         match action {
98                                 &ErrorAction::SendErrorMessage { .. } => {
99                                         nodes[1].logger.assert_log_regex("lightning::ln::channelmanager", expected_regex, 1);
100                                 },
101                                 _ => panic!("unexpected event!"),
102                         }
103                 } else { assert!(false); }
104         };
105
106         use crate::ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT;
107
108         // Test all mutations that would make the channel open message insane
109         insane_open_helper(format!("Per our config, funding must be at most {}. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; msg });
110         insane_open_helper(format!("Funding must be smaller than the total bitcoin supply. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; msg });
111
112         insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.funding_satoshis + 1; msg });
113
114         insane_open_helper(r"push_msat \d+ was larger than channel amount minus reserve \(\d+\)", |mut msg| { msg.push_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
115
116         insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.dust_limit_satoshis = msg.funding_satoshis + 1 ; msg });
117
118         insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.htlc_minimum_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg });
119
120         insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg });
121
122         insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { msg.max_accepted_htlcs = 0; msg });
123
124         insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.max_accepted_htlcs = 484; msg });
125 }
126
127 #[test]
128 fn test_funding_exceeds_no_wumbo_limit() {
129         // Test that if a peer does not support wumbo channels, we'll refuse to open a wumbo channel to
130         // them.
131         use crate::ln::channel::MAX_FUNDING_SATOSHIS_NO_WUMBO;
132         let chanmon_cfgs = create_chanmon_cfgs(2);
133         let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
134         *node_cfgs[1].override_init_features.borrow_mut() = Some(channelmanager::provided_init_features(&test_default_channel_config()).clear_wumbo());
135         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
136         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
137
138         match nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), MAX_FUNDING_SATOSHIS_NO_WUMBO + 1, 0, 42, None) {
139                 Err(APIError::APIMisuseError { err }) => {
140                         assert_eq!(format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, MAX_FUNDING_SATOSHIS_NO_WUMBO + 1), err);
141                 },
142                 _ => panic!()
143         }
144 }
145
146 fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
147         // A peer providing a channel_reserve_satoshis of 0 (or less than our dust limit) is insecure,
148         // but only for them. Because some LSPs do it with some level of trust of the clients (for a
149         // substantial UX improvement), we explicitly allow it. Because it's unlikely to happen often
150         // in normal testing, we test it explicitly here.
151         let chanmon_cfgs = create_chanmon_cfgs(2);
152         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
153         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
154         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
155         let default_config = UserConfig::default();
156
157         // Have node0 initiate a channel to node1 with aforementioned parameters
158         let mut push_amt = 100_000_000;
159         let feerate_per_kw = 253;
160         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
161         push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(&channel_type_features) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000;
162         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
163
164         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None).unwrap();
165         let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
166         if !send_from_initiator {
167                 open_channel_message.channel_reserve_satoshis = 0;
168                 open_channel_message.max_htlc_value_in_flight_msat = 100_000_000;
169         }
170         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
171
172         // Extract the channel accept message from node1 to node0
173         let mut accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
174         if send_from_initiator {
175                 accept_channel_message.channel_reserve_satoshis = 0;
176                 accept_channel_message.max_htlc_value_in_flight_msat = 100_000_000;
177         }
178         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
179         {
180                 let sender_node = if send_from_initiator { &nodes[1] } else { &nodes[0] };
181                 let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] };
182                 let mut sender_node_per_peer_lock;
183                 let mut sender_node_peer_state_lock;
184                 if send_from_initiator {
185                         let chan = get_inbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
186                         chan.context.holder_selected_channel_reserve_satoshis = 0;
187                         chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
188                 } else {
189                         let chan = get_outbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
190                         chan.context.holder_selected_channel_reserve_satoshis = 0;
191                         chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
192                 }
193         }
194
195         let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id);
196         let funding_msgs = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &funding_tx);
197         create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_msgs.0);
198
199         // nodes[0] should now be able to send the full balance to nodes[1], violating nodes[1]'s
200         // security model if it ever tries to send funds back to nodes[0] (but that's not our problem).
201         if send_from_initiator {
202                 send_payment(&nodes[0], &[&nodes[1]], 100_000_000
203                         // Note that for outbound channels we have to consider the commitment tx fee and the
204                         // "fee spike buffer", which is currently a multiple of the total commitment tx fee as
205                         // well as an additional HTLC.
206                         - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * commit_tx_fee_msat(feerate_per_kw, 2, &channel_type_features));
207         } else {
208                 send_payment(&nodes[1], &[&nodes[0]], push_amt);
209         }
210 }
211
212 #[test]
213 fn test_counterparty_no_reserve() {
214         do_test_counterparty_no_reserve(true);
215         do_test_counterparty_no_reserve(false);
216 }
217
218 #[test]
219 fn test_async_inbound_update_fee() {
220         let chanmon_cfgs = create_chanmon_cfgs(2);
221         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
222         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
223         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
224         create_announced_chan_between_nodes(&nodes, 0, 1);
225
226         // balancing
227         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
228
229         // A                                        B
230         // update_fee                            ->
231         // send (1) commitment_signed            -.
232         //                                       <- update_add_htlc/commitment_signed
233         // send (2) RAA (awaiting remote revoke) -.
234         // (1) commitment_signed is delivered    ->
235         //                                       .- send (3) RAA (awaiting remote revoke)
236         // (2) RAA is delivered                  ->
237         //                                       .- send (4) commitment_signed
238         //                                       <- (3) RAA is delivered
239         // send (5) commitment_signed            -.
240         //                                       <- (4) commitment_signed is delivered
241         // send (6) RAA                          -.
242         // (5) commitment_signed is delivered    ->
243         //                                       <- RAA
244         // (6) RAA is delivered                  ->
245
246         // First nodes[0] generates an update_fee
247         {
248                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
249                 *feerate_lock += 20;
250         }
251         nodes[0].node.timer_tick_occurred();
252         check_added_monitors!(nodes[0], 1);
253
254         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
255         assert_eq!(events_0.len(), 1);
256         let (update_msg, commitment_signed) = match events_0[0] { // (1)
257                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
258                         (update_fee.as_ref(), commitment_signed)
259                 },
260                 _ => panic!("Unexpected event"),
261         };
262
263         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
264
265         // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
266         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000);
267         nodes[1].node.send_payment_with_route(&route, our_payment_hash,
268                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
269         check_added_monitors!(nodes[1], 1);
270
271         let payment_event = {
272                 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
273                 assert_eq!(events_1.len(), 1);
274                 SendEvent::from_event(events_1.remove(0))
275         };
276         assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
277         assert_eq!(payment_event.msgs.len(), 1);
278
279         // ...now when the messages get delivered everyone should be happy
280         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
281         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2)
282         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
283         // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
284         check_added_monitors!(nodes[0], 1);
285
286         // deliver(1), generate (3):
287         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
288         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
289         // nodes[1] is awaiting nodes[0] revoke_and_ack so get_event_msg's assert(len == 1) passes
290         check_added_monitors!(nodes[1], 1);
291
292         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack); // deliver (2)
293         let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
294         assert!(bs_update.update_add_htlcs.is_empty()); // (4)
295         assert!(bs_update.update_fulfill_htlcs.is_empty()); // (4)
296         assert!(bs_update.update_fail_htlcs.is_empty()); // (4)
297         assert!(bs_update.update_fail_malformed_htlcs.is_empty()); // (4)
298         assert!(bs_update.update_fee.is_none()); // (4)
299         check_added_monitors!(nodes[1], 1);
300
301         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); // deliver (3)
302         let as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
303         assert!(as_update.update_add_htlcs.is_empty()); // (5)
304         assert!(as_update.update_fulfill_htlcs.is_empty()); // (5)
305         assert!(as_update.update_fail_htlcs.is_empty()); // (5)
306         assert!(as_update.update_fail_malformed_htlcs.is_empty()); // (5)
307         assert!(as_update.update_fee.is_none()); // (5)
308         check_added_monitors!(nodes[0], 1);
309
310         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed); // deliver (4)
311         let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
312         // only (6) so get_event_msg's assert(len == 1) passes
313         check_added_monitors!(nodes[0], 1);
314
315         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update.commitment_signed); // deliver (5)
316         let bs_second_revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
317         check_added_monitors!(nodes[1], 1);
318
319         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke);
320         check_added_monitors!(nodes[0], 1);
321
322         let events_2 = nodes[0].node.get_and_clear_pending_events();
323         assert_eq!(events_2.len(), 1);
324         match events_2[0] {
325                 Event::PendingHTLCsForwardable {..} => {}, // If we actually processed we'd receive the payment
326                 _ => panic!("Unexpected event"),
327         }
328
329         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke); // deliver (6)
330         check_added_monitors!(nodes[1], 1);
331 }
332
333 #[test]
334 fn test_update_fee_unordered_raa() {
335         // Just the intro to the previous test followed by an out-of-order RAA (which caused a
336         // crash in an earlier version of the update_fee patch)
337         let chanmon_cfgs = create_chanmon_cfgs(2);
338         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
339         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
340         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
341         create_announced_chan_between_nodes(&nodes, 0, 1);
342
343         // balancing
344         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
345
346         // First nodes[0] generates an update_fee
347         {
348                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
349                 *feerate_lock += 20;
350         }
351         nodes[0].node.timer_tick_occurred();
352         check_added_monitors!(nodes[0], 1);
353
354         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
355         assert_eq!(events_0.len(), 1);
356         let update_msg = match events_0[0] { // (1)
357                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
358                         update_fee.as_ref()
359                 },
360                 _ => panic!("Unexpected event"),
361         };
362
363         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
364
365         // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
366         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000);
367         nodes[1].node.send_payment_with_route(&route, our_payment_hash,
368                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
369         check_added_monitors!(nodes[1], 1);
370
371         let payment_event = {
372                 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
373                 assert_eq!(events_1.len(), 1);
374                 SendEvent::from_event(events_1.remove(0))
375         };
376         assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
377         assert_eq!(payment_event.msgs.len(), 1);
378
379         // ...now when the messages get delivered everyone should be happy
380         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
381         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2)
382         let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
383         // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
384         check_added_monitors!(nodes[0], 1);
385
386         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg); // deliver (2)
387         check_added_monitors!(nodes[1], 1);
388
389         // We can't continue, sadly, because our (1) now has a bogus signature
390 }
391
392 #[test]
393 fn test_multi_flight_update_fee() {
394         let chanmon_cfgs = create_chanmon_cfgs(2);
395         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
396         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
397         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
398         create_announced_chan_between_nodes(&nodes, 0, 1);
399
400         // A                                        B
401         // update_fee/commitment_signed          ->
402         //                                       .- send (1) RAA and (2) commitment_signed
403         // update_fee (never committed)          ->
404         // (3) update_fee                        ->
405         // We have to manually generate the above update_fee, it is allowed by the protocol but we
406         // don't track which updates correspond to which revoke_and_ack responses so we're in
407         // AwaitingRAA mode and will not generate the update_fee yet.
408         //                                       <- (1) RAA delivered
409         // (3) is generated and send (4) CS      -.
410         // Note that A cannot generate (4) prior to (1) being delivered as it otherwise doesn't
411         // know the per_commitment_point to use for it.
412         //                                       <- (2) commitment_signed delivered
413         // revoke_and_ack                        ->
414         //                                          B should send no response here
415         // (4) commitment_signed delivered       ->
416         //                                       <- RAA/commitment_signed delivered
417         // revoke_and_ack                        ->
418
419         // First nodes[0] generates an update_fee
420         let initial_feerate;
421         {
422                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
423                 initial_feerate = *feerate_lock;
424                 *feerate_lock = initial_feerate + 20;
425         }
426         nodes[0].node.timer_tick_occurred();
427         check_added_monitors!(nodes[0], 1);
428
429         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
430         assert_eq!(events_0.len(), 1);
431         let (update_msg_1, commitment_signed_1) = match events_0[0] { // (1)
432                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
433                         (update_fee.as_ref().unwrap(), commitment_signed)
434                 },
435                 _ => panic!("Unexpected event"),
436         };
437
438         // Deliver first update_fee/commitment_signed pair, generating (1) and (2):
439         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg_1);
440         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed_1);
441         let (bs_revoke_msg, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
442         check_added_monitors!(nodes[1], 1);
443
444         // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment
445         // transaction:
446         {
447                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
448                 *feerate_lock = initial_feerate + 40;
449         }
450         nodes[0].node.timer_tick_occurred();
451         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
452         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
453
454         // Create the (3) update_fee message that nodes[0] will generate before it does...
455         let mut update_msg_2 = msgs::UpdateFee {
456                 channel_id: update_msg_1.channel_id.clone(),
457                 feerate_per_kw: (initial_feerate + 30) as u32,
458         };
459
460         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2);
461
462         update_msg_2.feerate_per_kw = (initial_feerate + 40) as u32;
463         // Deliver (3)
464         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2);
465
466         // Deliver (1), generating (3) and (4)
467         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg);
468         let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
469         check_added_monitors!(nodes[0], 1);
470         assert!(as_second_update.update_add_htlcs.is_empty());
471         assert!(as_second_update.update_fulfill_htlcs.is_empty());
472         assert!(as_second_update.update_fail_htlcs.is_empty());
473         assert!(as_second_update.update_fail_malformed_htlcs.is_empty());
474         // Check that the update_fee newly generated matches what we delivered:
475         assert_eq!(as_second_update.update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id);
476         assert_eq!(as_second_update.update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw);
477
478         // Deliver (2) commitment_signed
479         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
480         let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
481         check_added_monitors!(nodes[0], 1);
482         // No commitment_signed so get_event_msg's assert(len == 1) passes
483
484         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg);
485         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
486         check_added_monitors!(nodes[1], 1);
487
488         // Delever (4)
489         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed);
490         let (bs_second_revoke, bs_second_commitment) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
491         check_added_monitors!(nodes[1], 1);
492
493         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke);
494         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
495         check_added_monitors!(nodes[0], 1);
496
497         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment);
498         let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
499         // No commitment_signed so get_event_msg's assert(len == 1) passes
500         check_added_monitors!(nodes[0], 1);
501
502         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke);
503         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
504         check_added_monitors!(nodes[1], 1);
505 }
506
507 fn do_test_sanity_on_in_flight_opens(steps: u8) {
508         // Previously, we had issues deserializing channels when we hadn't connected the first block
509         // after creation. To catch that and similar issues, we lean on the Node::drop impl to test
510         // serialization round-trips and simply do steps towards opening a channel and then drop the
511         // Node objects.
512
513         let chanmon_cfgs = create_chanmon_cfgs(2);
514         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
515         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
516         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
517
518         if steps & 0b1000_0000 != 0{
519                 let block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new());
520                 connect_block(&nodes[0], &block);
521                 connect_block(&nodes[1], &block);
522         }
523
524         if steps & 0x0f == 0 { return; }
525         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
526         let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
527
528         if steps & 0x0f == 1 { return; }
529         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
530         let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
531
532         if steps & 0x0f == 2 { return; }
533         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
534
535         let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
536
537         if steps & 0x0f == 3 { return; }
538         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
539         check_added_monitors!(nodes[0], 0);
540         let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
541
542         if steps & 0x0f == 4 { return; }
543         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
544         {
545                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
546                 assert_eq!(added_monitors.len(), 1);
547                 assert_eq!(added_monitors[0].0, funding_output);
548                 added_monitors.clear();
549         }
550         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
551
552         let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
553
554         if steps & 0x0f == 5 { return; }
555         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
556         {
557                 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
558                 assert_eq!(added_monitors.len(), 1);
559                 assert_eq!(added_monitors[0].0, funding_output);
560                 added_monitors.clear();
561         }
562
563         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
564         let events_4 = nodes[0].node.get_and_clear_pending_events();
565         assert_eq!(events_4.len(), 0);
566
567         if steps & 0x0f == 6 { return; }
568         create_chan_between_nodes_with_value_confirm_first(&nodes[0], &nodes[1], &tx, 2);
569
570         if steps & 0x0f == 7 { return; }
571         confirm_transaction_at(&nodes[0], &tx, 2);
572         connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
573         create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
574         expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id());
575 }
576
577 #[test]
578 fn test_sanity_on_in_flight_opens() {
579         do_test_sanity_on_in_flight_opens(0);
580         do_test_sanity_on_in_flight_opens(0 | 0b1000_0000);
581         do_test_sanity_on_in_flight_opens(1);
582         do_test_sanity_on_in_flight_opens(1 | 0b1000_0000);
583         do_test_sanity_on_in_flight_opens(2);
584         do_test_sanity_on_in_flight_opens(2 | 0b1000_0000);
585         do_test_sanity_on_in_flight_opens(3);
586         do_test_sanity_on_in_flight_opens(3 | 0b1000_0000);
587         do_test_sanity_on_in_flight_opens(4);
588         do_test_sanity_on_in_flight_opens(4 | 0b1000_0000);
589         do_test_sanity_on_in_flight_opens(5);
590         do_test_sanity_on_in_flight_opens(5 | 0b1000_0000);
591         do_test_sanity_on_in_flight_opens(6);
592         do_test_sanity_on_in_flight_opens(6 | 0b1000_0000);
593         do_test_sanity_on_in_flight_opens(7);
594         do_test_sanity_on_in_flight_opens(7 | 0b1000_0000);
595         do_test_sanity_on_in_flight_opens(8);
596         do_test_sanity_on_in_flight_opens(8 | 0b1000_0000);
597 }
598
599 #[test]
600 fn test_update_fee_vanilla() {
601         let chanmon_cfgs = create_chanmon_cfgs(2);
602         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
603         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
604         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
605         create_announced_chan_between_nodes(&nodes, 0, 1);
606
607         {
608                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
609                 *feerate_lock += 25;
610         }
611         nodes[0].node.timer_tick_occurred();
612         check_added_monitors!(nodes[0], 1);
613
614         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
615         assert_eq!(events_0.len(), 1);
616         let (update_msg, commitment_signed) = match events_0[0] {
617                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
618                         (update_fee.as_ref(), commitment_signed)
619                 },
620                 _ => panic!("Unexpected event"),
621         };
622         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
623
624         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
625         let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
626         check_added_monitors!(nodes[1], 1);
627
628         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
629         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
630         check_added_monitors!(nodes[0], 1);
631
632         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
633         let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
634         // No commitment_signed so get_event_msg's assert(len == 1) passes
635         check_added_monitors!(nodes[0], 1);
636
637         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
638         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
639         check_added_monitors!(nodes[1], 1);
640 }
641
642 #[test]
643 fn test_update_fee_that_funder_cannot_afford() {
644         let chanmon_cfgs = create_chanmon_cfgs(2);
645         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
646         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
647         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
648         let channel_value = 5000;
649         let push_sats = 700;
650         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, push_sats * 1000);
651         let channel_id = chan.2;
652         let secp_ctx = Secp256k1::new();
653         let default_config = UserConfig::default();
654         let bs_channel_reserve_sats = get_holder_selected_channel_reserve_satoshis(channel_value, &default_config);
655
656         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
657
658         // Calculate the maximum feerate that A can afford. Note that we don't send an update_fee
659         // CONCURRENT_INBOUND_HTLC_FEE_BUFFER HTLCs before actually running out of local balance, so we
660         // calculate two different feerates here - the expected local limit as well as the expected
661         // remote limit.
662         let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / (commitment_tx_base_weight(&channel_type_features) + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32;
663         let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / commitment_tx_base_weight(&channel_type_features)) as u32;
664         {
665                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
666                 *feerate_lock = feerate;
667         }
668         nodes[0].node.timer_tick_occurred();
669         check_added_monitors!(nodes[0], 1);
670         let update_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
671
672         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg.update_fee.unwrap());
673
674         commitment_signed_dance!(nodes[1], nodes[0], update_msg.commitment_signed, false);
675
676         // Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate set above.
677         {
678                 let commitment_tx = get_local_commitment_txn!(nodes[1], channel_id)[0].clone();
679
680                 //We made sure neither party's funds are below the dust limit and there are no HTLCs here
681                 assert_eq!(commitment_tx.output.len(), 2);
682                 let total_fee: u64 = commit_tx_fee_msat(feerate, 0, &channel_type_features) / 1000;
683                 let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value);
684                 actual_fee = channel_value - actual_fee;
685                 assert_eq!(total_fee, actual_fee);
686         }
687
688         {
689                 // Increment the feerate by a small constant, accounting for rounding errors
690                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
691                 *feerate_lock += 4;
692         }
693         nodes[0].node.timer_tick_occurred();
694         nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot afford to send new feerate at {}", feerate + 4), 1);
695         check_added_monitors!(nodes[0], 0);
696
697         const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654;
698
699         // Get the EnforcingSigner for each channel, which will be used to (1) get the keys
700         // needed to sign the new commitment tx and (2) sign the new commitment tx.
701         let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = {
702                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
703                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
704                 let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
705                 let chan_signer = local_chan.get_signer();
706                 let pubkeys = chan_signer.as_ref().pubkeys();
707                 (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
708                  pubkeys.funding_pubkey)
709         };
710         let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point, remote_funding) = {
711                 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
712                 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
713                 let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
714                 let chan_signer = remote_chan.get_signer();
715                 let pubkeys = chan_signer.as_ref().pubkeys();
716                 (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
717                  chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
718                  pubkeys.funding_pubkey)
719         };
720
721         // Assemble the set of keys we can use for signatures for our commitment_signed message.
722         let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint,
723                 &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint);
724
725         let res = {
726                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
727                 let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
728                 let local_chan = local_chan_lock.channel_by_id.get(&chan.2).unwrap();
729                 let local_chan_signer = local_chan.get_signer();
730                 let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![];
731                 let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
732                         INITIAL_COMMITMENT_NUMBER - 1,
733                         push_sats,
734                         channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, &channel_type_features) / 1000,
735                         local_funding, remote_funding,
736                         commit_tx_keys.clone(),
737                         non_buffer_feerate + 4,
738                         &mut htlcs,
739                         &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
740                 );
741                 local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap()
742         };
743
744         let commit_signed_msg = msgs::CommitmentSigned {
745                 channel_id: chan.2,
746                 signature: res.0,
747                 htlc_signatures: res.1,
748                 #[cfg(taproot)]
749                 partial_signature_with_nonce: None,
750         };
751
752         let update_fee = msgs::UpdateFee {
753                 channel_id: chan.2,
754                 feerate_per_kw: non_buffer_feerate + 4,
755         };
756
757         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_fee);
758
759         //While producing the commitment_signed response after handling a received update_fee request the
760         //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve)
761         //Should produce and error.
762         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
763         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1);
764         check_added_monitors!(nodes[1], 1);
765         check_closed_broadcast!(nodes[1], true);
766         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") },
767                 [nodes[0].node.get_our_node_id()], channel_value);
768 }
769
770 #[test]
771 fn test_update_fee_with_fundee_update_add_htlc() {
772         let chanmon_cfgs = create_chanmon_cfgs(2);
773         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
774         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
775         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
776         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
777
778         // balancing
779         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
780
781         {
782                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
783                 *feerate_lock += 20;
784         }
785         nodes[0].node.timer_tick_occurred();
786         check_added_monitors!(nodes[0], 1);
787
788         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
789         assert_eq!(events_0.len(), 1);
790         let (update_msg, commitment_signed) = match events_0[0] {
791                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
792                         (update_fee.as_ref(), commitment_signed)
793                 },
794                 _ => panic!("Unexpected event"),
795         };
796         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
797         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
798         let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
799         check_added_monitors!(nodes[1], 1);
800
801         let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 800000);
802
803         // nothing happens since node[1] is in AwaitingRemoteRevoke
804         nodes[1].node.send_payment_with_route(&route, our_payment_hash,
805                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
806         {
807                 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
808                 assert_eq!(added_monitors.len(), 0);
809                 added_monitors.clear();
810         }
811         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
812         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
813         // node[1] has nothing to do
814
815         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
816         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
817         check_added_monitors!(nodes[0], 1);
818
819         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
820         let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
821         // No commitment_signed so get_event_msg's assert(len == 1) passes
822         check_added_monitors!(nodes[0], 1);
823         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
824         check_added_monitors!(nodes[1], 1);
825         // AwaitingRemoteRevoke ends here
826
827         let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
828         assert_eq!(commitment_update.update_add_htlcs.len(), 1);
829         assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0);
830         assert_eq!(commitment_update.update_fail_htlcs.len(), 0);
831         assert_eq!(commitment_update.update_fail_malformed_htlcs.len(), 0);
832         assert_eq!(commitment_update.update_fee.is_none(), true);
833
834         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &commitment_update.update_add_htlcs[0]);
835         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed);
836         check_added_monitors!(nodes[0], 1);
837         let (revoke, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
838
839         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke);
840         check_added_monitors!(nodes[1], 1);
841         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
842
843         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed);
844         check_added_monitors!(nodes[1], 1);
845         let revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
846         // No commitment_signed so get_event_msg's assert(len == 1) passes
847
848         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke);
849         check_added_monitors!(nodes[0], 1);
850         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
851
852         expect_pending_htlcs_forwardable!(nodes[0]);
853
854         let events = nodes[0].node.get_and_clear_pending_events();
855         assert_eq!(events.len(), 1);
856         match events[0] {
857                 Event::PaymentClaimable { .. } => { },
858                 _ => panic!("Unexpected event"),
859         };
860
861         claim_payment(&nodes[1], &vec!(&nodes[0])[..], our_payment_preimage);
862
863         send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
864         send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
865         close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
866         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
867         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
868 }
869
870 #[test]
871 fn test_update_fee() {
872         let chanmon_cfgs = create_chanmon_cfgs(2);
873         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
874         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
875         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
876         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
877         let channel_id = chan.2;
878
879         // A                                        B
880         // (1) update_fee/commitment_signed      ->
881         //                                       <- (2) revoke_and_ack
882         //                                       .- send (3) commitment_signed
883         // (4) update_fee/commitment_signed      ->
884         //                                       .- send (5) revoke_and_ack (no CS as we're awaiting a revoke)
885         //                                       <- (3) commitment_signed delivered
886         // send (6) revoke_and_ack               -.
887         //                                       <- (5) deliver revoke_and_ack
888         // (6) deliver revoke_and_ack            ->
889         //                                       .- send (7) commitment_signed in response to (4)
890         //                                       <- (7) deliver commitment_signed
891         // revoke_and_ack                        ->
892
893         // Create and deliver (1)...
894         let feerate;
895         {
896                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
897                 feerate = *feerate_lock;
898                 *feerate_lock = feerate + 20;
899         }
900         nodes[0].node.timer_tick_occurred();
901         check_added_monitors!(nodes[0], 1);
902
903         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
904         assert_eq!(events_0.len(), 1);
905         let (update_msg, commitment_signed) = match events_0[0] {
906                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
907                         (update_fee.as_ref(), commitment_signed)
908                 },
909                 _ => panic!("Unexpected event"),
910         };
911         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
912
913         // Generate (2) and (3):
914         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
915         let (revoke_msg, commitment_signed_0) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
916         check_added_monitors!(nodes[1], 1);
917
918         // Deliver (2):
919         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
920         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
921         check_added_monitors!(nodes[0], 1);
922
923         // Create and deliver (4)...
924         {
925                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
926                 *feerate_lock = feerate + 30;
927         }
928         nodes[0].node.timer_tick_occurred();
929         check_added_monitors!(nodes[0], 1);
930         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
931         assert_eq!(events_0.len(), 1);
932         let (update_msg, commitment_signed) = match events_0[0] {
933                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
934                         (update_fee.as_ref(), commitment_signed)
935                 },
936                 _ => panic!("Unexpected event"),
937         };
938
939         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
940         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
941         check_added_monitors!(nodes[1], 1);
942         // ... creating (5)
943         let revoke_msg = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
944         // No commitment_signed so get_event_msg's assert(len == 1) passes
945
946         // Handle (3), creating (6):
947         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_0);
948         check_added_monitors!(nodes[0], 1);
949         let revoke_msg_0 = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
950         // No commitment_signed so get_event_msg's assert(len == 1) passes
951
952         // Deliver (5):
953         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
954         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
955         check_added_monitors!(nodes[0], 1);
956
957         // Deliver (6), creating (7):
958         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg_0);
959         let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
960         assert!(commitment_update.update_add_htlcs.is_empty());
961         assert!(commitment_update.update_fulfill_htlcs.is_empty());
962         assert!(commitment_update.update_fail_htlcs.is_empty());
963         assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
964         assert!(commitment_update.update_fee.is_none());
965         check_added_monitors!(nodes[1], 1);
966
967         // Deliver (7)
968         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed);
969         check_added_monitors!(nodes[0], 1);
970         let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
971         // No commitment_signed so get_event_msg's assert(len == 1) passes
972
973         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
974         check_added_monitors!(nodes[1], 1);
975         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
976
977         assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30);
978         assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30);
979         close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
980         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
981         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
982 }
983
984 #[test]
985 fn fake_network_test() {
986         // Simple test which builds a network of ChannelManagers, connects them to each other, and
987         // tests that payments get routed and transactions broadcast in semi-reasonable ways.
988         let chanmon_cfgs = create_chanmon_cfgs(4);
989         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
990         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
991         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
992
993         // Create some initial channels
994         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
995         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
996         let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
997
998         // Rebalance the network a bit by relaying one payment through all the channels...
999         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1000         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1001         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1002         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1003
1004         // Send some more payments
1005         send_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 1000000);
1006         send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1], &nodes[0])[..], 1000000);
1007         send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1])[..], 1000000);
1008
1009         // Test failure packets
1010         let payment_hash_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 1000000).1;
1011         fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], payment_hash_1);
1012
1013         // Add a new channel that skips 3
1014         let chan_4 = create_announced_chan_between_nodes(&nodes, 1, 3);
1015
1016         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 1000000);
1017         send_payment(&nodes[2], &vec!(&nodes[3])[..], 1000000);
1018         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1019         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1020         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1021         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1022         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1023
1024         // Do some rebalance loop payments, simultaneously
1025         let mut hops = Vec::with_capacity(3);
1026         hops.push(RouteHop {
1027                 pubkey: nodes[2].node.get_our_node_id(),
1028                 node_features: NodeFeatures::empty(),
1029                 short_channel_id: chan_2.0.contents.short_channel_id,
1030                 channel_features: ChannelFeatures::empty(),
1031                 fee_msat: 0,
1032                 cltv_expiry_delta: chan_3.0.contents.cltv_expiry_delta as u32
1033         });
1034         hops.push(RouteHop {
1035                 pubkey: nodes[3].node.get_our_node_id(),
1036                 node_features: NodeFeatures::empty(),
1037                 short_channel_id: chan_3.0.contents.short_channel_id,
1038                 channel_features: ChannelFeatures::empty(),
1039                 fee_msat: 0,
1040                 cltv_expiry_delta: chan_4.1.contents.cltv_expiry_delta as u32
1041         });
1042         hops.push(RouteHop {
1043                 pubkey: nodes[1].node.get_our_node_id(),
1044                 node_features: nodes[1].node.node_features(),
1045                 short_channel_id: chan_4.0.contents.short_channel_id,
1046                 channel_features: nodes[1].node.channel_features(),
1047                 fee_msat: 1000000,
1048                 cltv_expiry_delta: TEST_FINAL_CLTV,
1049         });
1050         hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
1051         hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
1052         let payment_preimage_1 = send_along_route(&nodes[1], Route { paths: vec![Path { hops, blinded_tail: None }], payment_params: None }, &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0;
1053
1054         let mut hops = Vec::with_capacity(3);
1055         hops.push(RouteHop {
1056                 pubkey: nodes[3].node.get_our_node_id(),
1057                 node_features: NodeFeatures::empty(),
1058                 short_channel_id: chan_4.0.contents.short_channel_id,
1059                 channel_features: ChannelFeatures::empty(),
1060                 fee_msat: 0,
1061                 cltv_expiry_delta: chan_3.1.contents.cltv_expiry_delta as u32
1062         });
1063         hops.push(RouteHop {
1064                 pubkey: nodes[2].node.get_our_node_id(),
1065                 node_features: NodeFeatures::empty(),
1066                 short_channel_id: chan_3.0.contents.short_channel_id,
1067                 channel_features: ChannelFeatures::empty(),
1068                 fee_msat: 0,
1069                 cltv_expiry_delta: chan_2.1.contents.cltv_expiry_delta as u32
1070         });
1071         hops.push(RouteHop {
1072                 pubkey: nodes[1].node.get_our_node_id(),
1073                 node_features: nodes[1].node.node_features(),
1074                 short_channel_id: chan_2.0.contents.short_channel_id,
1075                 channel_features: nodes[1].node.channel_features(),
1076                 fee_msat: 1000000,
1077                 cltv_expiry_delta: TEST_FINAL_CLTV,
1078         });
1079         hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
1080         hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
1081         let payment_hash_2 = send_along_route(&nodes[1], Route { paths: vec![Path { hops, blinded_tail: None }], payment_params: None }, &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1;
1082
1083         // Claim the rebalances...
1084         fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
1085         claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
1086
1087         // Close down the channels...
1088         close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
1089         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1090         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
1091         close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
1092         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
1093         check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1094         close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
1095         check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
1096         check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
1097         close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
1098         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
1099         check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1100 }
1101
1102 #[test]
1103 fn holding_cell_htlc_counting() {
1104         // Tests that HTLCs in the holding cell count towards the pending HTLC limits on outbound HTLCs
1105         // to ensure we don't end up with HTLCs sitting around in our holding cell for several
1106         // commitment dance rounds.
1107         let chanmon_cfgs = create_chanmon_cfgs(3);
1108         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1109         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1110         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1111         create_announced_chan_between_nodes(&nodes, 0, 1);
1112         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
1113
1114         // Fetch a route in advance as we will be unable to once we're unable to send.
1115         let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
1116
1117         let mut payments = Vec::new();
1118         for _ in 0..50 {
1119                 let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
1120                 nodes[1].node.send_payment_with_route(&route, payment_hash,
1121                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
1122                 payments.push((payment_preimage, payment_hash));
1123         }
1124         check_added_monitors!(nodes[1], 1);
1125
1126         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
1127         assert_eq!(events.len(), 1);
1128         let initial_payment_event = SendEvent::from_event(events.pop().unwrap());
1129         assert_eq!(initial_payment_event.node_id, nodes[2].node.get_our_node_id());
1130
1131         // There is now one HTLC in an outbound commitment transaction and (OUR_MAX_HTLCS - 1) HTLCs in
1132         // the holding cell waiting on B's RAA to send. At this point we should not be able to add
1133         // another HTLC.
1134         {
1135                 unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, payment_hash_1,
1136                                 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)
1137                         ), true, APIError::ChannelUnavailable { .. }, {});
1138                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1139         }
1140
1141         // This should also be true if we try to forward a payment.
1142         let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
1143         {
1144                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1145                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1146                 check_added_monitors!(nodes[0], 1);
1147         }
1148
1149         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1150         assert_eq!(events.len(), 1);
1151         let payment_event = SendEvent::from_event(events.pop().unwrap());
1152         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1153
1154         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1155         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
1156         // We have to forward pending HTLCs twice - once tries to forward the payment forward (and
1157         // fails), the second will process the resulting failure and fail the HTLC backward.
1158         expect_pending_htlcs_forwardable!(nodes[1]);
1159         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
1160         check_added_monitors!(nodes[1], 1);
1161
1162         let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1163         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_updates.update_fail_htlcs[0]);
1164         commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true);
1165
1166         expect_payment_failed_with_update!(nodes[0], payment_hash_2, false, chan_2.0.contents.short_channel_id, false);
1167
1168         // Now forward all the pending HTLCs and claim them back
1169         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &initial_payment_event.msgs[0]);
1170         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &initial_payment_event.commitment_msg);
1171         check_added_monitors!(nodes[2], 1);
1172
1173         let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1174         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1175         check_added_monitors!(nodes[1], 1);
1176         let as_updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1177
1178         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed);
1179         check_added_monitors!(nodes[1], 1);
1180         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1181
1182         for ref update in as_updates.update_add_htlcs.iter() {
1183                 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), update);
1184         }
1185         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_updates.commitment_signed);
1186         check_added_monitors!(nodes[2], 1);
1187         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
1188         check_added_monitors!(nodes[2], 1);
1189         let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1190
1191         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1192         check_added_monitors!(nodes[1], 1);
1193         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed);
1194         check_added_monitors!(nodes[1], 1);
1195         let as_final_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1196
1197         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_final_raa);
1198         check_added_monitors!(nodes[2], 1);
1199
1200         expect_pending_htlcs_forwardable!(nodes[2]);
1201
1202         let events = nodes[2].node.get_and_clear_pending_events();
1203         assert_eq!(events.len(), payments.len());
1204         for (event, &(_, ref hash)) in events.iter().zip(payments.iter()) {
1205                 match event {
1206                         &Event::PaymentClaimable { ref payment_hash, .. } => {
1207                                 assert_eq!(*payment_hash, *hash);
1208                         },
1209                         _ => panic!("Unexpected event"),
1210                 };
1211         }
1212
1213         for (preimage, _) in payments.drain(..) {
1214                 claim_payment(&nodes[1], &[&nodes[2]], preimage);
1215         }
1216
1217         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
1218 }
1219
1220 #[test]
1221 fn duplicate_htlc_test() {
1222         // Test that we accept duplicate payment_hash HTLCs across the network and that
1223         // claiming/failing them are all separate and don't affect each other
1224         let chanmon_cfgs = create_chanmon_cfgs(6);
1225         let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
1226         let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs, &[None, None, None, None, None, None]);
1227         let mut nodes = create_network(6, &node_cfgs, &node_chanmgrs);
1228
1229         // Create some initial channels to route via 3 to 4/5 from 0/1/2
1230         create_announced_chan_between_nodes(&nodes, 0, 3);
1231         create_announced_chan_between_nodes(&nodes, 1, 3);
1232         create_announced_chan_between_nodes(&nodes, 2, 3);
1233         create_announced_chan_between_nodes(&nodes, 3, 4);
1234         create_announced_chan_between_nodes(&nodes, 3, 5);
1235
1236         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000);
1237
1238         *nodes[0].network_payment_count.borrow_mut() -= 1;
1239         assert_eq!(route_payment(&nodes[1], &vec!(&nodes[3])[..], 1000000).0, payment_preimage);
1240
1241         *nodes[0].network_payment_count.borrow_mut() -= 1;
1242         assert_eq!(route_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], 1000000).0, payment_preimage);
1243
1244         claim_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], payment_preimage);
1245         fail_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], payment_hash);
1246         claim_payment(&nodes[1], &vec!(&nodes[3])[..], payment_preimage);
1247 }
1248
1249 #[test]
1250 fn test_duplicate_htlc_different_direction_onchain() {
1251         // Test that ChannelMonitor doesn't generate 2 preimage txn
1252         // when we have 2 HTLCs with same preimage that go across a node
1253         // in opposite directions, even with the same payment secret.
1254         let chanmon_cfgs = create_chanmon_cfgs(2);
1255         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1256         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1257         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1258
1259         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1260
1261         // balancing
1262         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
1263
1264         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000);
1265
1266         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], 800_000);
1267         let node_a_payment_secret = nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
1268         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[0]]], 800_000, payment_hash, node_a_payment_secret);
1269
1270         // Provide preimage to node 0 by claiming payment
1271         nodes[0].node.claim_funds(payment_preimage);
1272         expect_payment_claimed!(nodes[0], payment_hash, 800_000);
1273         check_added_monitors!(nodes[0], 1);
1274
1275         // Broadcast node 1 commitment txn
1276         let remote_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
1277
1278         assert_eq!(remote_txn[0].output.len(), 4); // 1 local, 1 remote, 1 htlc inbound, 1 htlc outbound
1279         let mut has_both_htlcs = 0; // check htlcs match ones committed
1280         for outp in remote_txn[0].output.iter() {
1281                 if outp.value == 800_000 / 1000 {
1282                         has_both_htlcs += 1;
1283                 } else if outp.value == 900_000 / 1000 {
1284                         has_both_htlcs += 1;
1285                 }
1286         }
1287         assert_eq!(has_both_htlcs, 2);
1288
1289         mine_transaction(&nodes[0], &remote_txn[0]);
1290         check_added_monitors!(nodes[0], 1);
1291         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
1292         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
1293
1294         let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
1295         assert_eq!(claim_txn.len(), 3);
1296
1297         check_spends!(claim_txn[0], remote_txn[0]); // Immediate HTLC claim with preimage
1298         check_spends!(claim_txn[1], remote_txn[0]);
1299         check_spends!(claim_txn[2], remote_txn[0]);
1300         let preimage_tx = &claim_txn[0];
1301         let (preimage_bump_tx, timeout_tx) = if claim_txn[1].input[0].previous_output == preimage_tx.input[0].previous_output {
1302                 (&claim_txn[1], &claim_txn[2])
1303         } else {
1304                 (&claim_txn[2], &claim_txn[1])
1305         };
1306
1307         assert_eq!(preimage_tx.input.len(), 1);
1308         assert_eq!(preimage_bump_tx.input.len(), 1);
1309
1310         assert_eq!(preimage_tx.input.len(), 1);
1311         assert_eq!(preimage_tx.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC 1 <--> 0, preimage tx
1312         assert_eq!(remote_txn[0].output[preimage_tx.input[0].previous_output.vout as usize].value, 800);
1313
1314         assert_eq!(timeout_tx.input.len(), 1);
1315         assert_eq!(timeout_tx.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // HTLC 0 <--> 1, timeout tx
1316         check_spends!(timeout_tx, remote_txn[0]);
1317         assert_eq!(remote_txn[0].output[timeout_tx.input[0].previous_output.vout as usize].value, 900);
1318
1319         let events = nodes[0].node.get_and_clear_pending_msg_events();
1320         assert_eq!(events.len(), 3);
1321         for e in events {
1322                 match e {
1323                         MessageSendEvent::BroadcastChannelUpdate { .. } => {},
1324                         MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
1325                                 assert_eq!(node_id, nodes[1].node.get_our_node_id());
1326                                 assert_eq!(msg.data, "Channel closed because commitment or closing transaction was confirmed on chain.");
1327                         },
1328                         MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
1329                                 assert!(update_add_htlcs.is_empty());
1330                                 assert!(update_fail_htlcs.is_empty());
1331                                 assert_eq!(update_fulfill_htlcs.len(), 1);
1332                                 assert!(update_fail_malformed_htlcs.is_empty());
1333                                 assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
1334                         },
1335                         _ => panic!("Unexpected event"),
1336                 }
1337         }
1338 }
1339
1340 #[test]
1341 fn test_basic_channel_reserve() {
1342         let chanmon_cfgs = create_chanmon_cfgs(2);
1343         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1344         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1345         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1346         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1347
1348         let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
1349         let channel_reserve = chan_stat.channel_reserve_msat;
1350
1351         // The 2* and +1 are for the fee spike reserve.
1352         let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], nodes[1], chan.2), 1 + 1, &get_channel_type_features!(nodes[0], nodes[1], chan.2));
1353         let max_can_send = 5000000 - channel_reserve - commit_tx_fee;
1354         let (mut route, our_payment_hash, _, our_payment_secret) =
1355                 get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
1356         route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
1357         let err = nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1358                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).err().unwrap();
1359         match err {
1360                 PaymentSendFailure::AllFailedResendSafe(ref fails) => {
1361                         if let &APIError::ChannelUnavailable { .. } = &fails[0] {}
1362                         else { panic!("Unexpected error variant"); }
1363                 },
1364                 _ => panic!("Unexpected error variant"),
1365         }
1366         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1367
1368         send_payment(&nodes[0], &vec![&nodes[1]], max_can_send);
1369 }
1370
1371 #[test]
1372 fn test_fee_spike_violation_fails_htlc() {
1373         let chanmon_cfgs = create_chanmon_cfgs(2);
1374         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1375         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1376         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1377         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1378
1379         let (mut route, payment_hash, _, payment_secret) =
1380                 get_route_and_payment_hash!(nodes[0], nodes[1], 3460000);
1381         route.paths[0].hops[0].fee_msat += 1;
1382         // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1383         let secp_ctx = Secp256k1::new();
1384         let session_priv = SecretKey::from_slice(&[42; 32]).expect("RNG is bad!");
1385
1386         let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1;
1387
1388         let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
1389         let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
1390                 3460001, RecipientOnionFields::secret_only(payment_secret), cur_height, &None).unwrap();
1391         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
1392         let msg = msgs::UpdateAddHTLC {
1393                 channel_id: chan.2,
1394                 htlc_id: 0,
1395                 amount_msat: htlc_msat,
1396                 payment_hash: payment_hash,
1397                 cltv_expiry: htlc_cltv,
1398                 onion_routing_packet: onion_packet,
1399                 skimmed_fee_msat: None,
1400         };
1401
1402         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
1403
1404         // Now manually create the commitment_signed message corresponding to the update_add
1405         // nodes[0] just sent. In the code for construction of this message, "local" refers
1406         // to the sender of the message, and "remote" refers to the receiver.
1407
1408         let feerate_per_kw = get_feerate!(nodes[0], nodes[1], chan.2);
1409
1410         const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
1411
1412         // Get the EnforcingSigner for each channel, which will be used to (1) get the keys
1413         // needed to sign the new commitment tx and (2) sign the new commitment tx.
1414         let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point, local_funding) = {
1415                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
1416                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
1417                 let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
1418                 let chan_signer = local_chan.get_signer();
1419                 // Make the signer believe we validated another commitment, so we can release the secret
1420                 chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
1421
1422                 let pubkeys = chan_signer.as_ref().pubkeys();
1423                 (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
1424                  chan_signer.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER),
1425                  chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx),
1426                  chan_signer.as_ref().pubkeys().funding_pubkey)
1427         };
1428         let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = {
1429                 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
1430                 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
1431                 let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
1432                 let chan_signer = remote_chan.get_signer();
1433                 let pubkeys = chan_signer.as_ref().pubkeys();
1434                 (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
1435                  chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
1436                  chan_signer.as_ref().pubkeys().funding_pubkey)
1437         };
1438
1439         // Assemble the set of keys we can use for signatures for our commitment_signed message.
1440         let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint,
1441                 &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint);
1442
1443         // Build the remote commitment transaction so we can sign it, and then later use the
1444         // signature for the commitment_signed message.
1445         let local_chan_balance = 1313;
1446
1447         let accepted_htlc_info = chan_utils::HTLCOutputInCommitment {
1448                 offered: false,
1449                 amount_msat: 3460001,
1450                 cltv_expiry: htlc_cltv,
1451                 payment_hash,
1452                 transaction_output_index: Some(1),
1453         };
1454
1455         let commitment_number = INITIAL_COMMITMENT_NUMBER - 1;
1456
1457         let res = {
1458                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
1459                 let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
1460                 let local_chan = local_chan_lock.channel_by_id.get(&chan.2).unwrap();
1461                 let local_chan_signer = local_chan.get_signer();
1462                 let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
1463                         commitment_number,
1464                         95000,
1465                         local_chan_balance,
1466                         local_funding, remote_funding,
1467                         commit_tx_keys.clone(),
1468                         feerate_per_kw,
1469                         &mut vec![(accepted_htlc_info, ())],
1470                         &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
1471                 );
1472                 local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap()
1473         };
1474
1475         let commit_signed_msg = msgs::CommitmentSigned {
1476                 channel_id: chan.2,
1477                 signature: res.0,
1478                 htlc_signatures: res.1,
1479                 #[cfg(taproot)]
1480                 partial_signature_with_nonce: None,
1481         };
1482
1483         // Send the commitment_signed message to the nodes[1].
1484         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
1485         let _ = nodes[1].node.get_and_clear_pending_msg_events();
1486
1487         // Send the RAA to nodes[1].
1488         let raa_msg = msgs::RevokeAndACK {
1489                 channel_id: chan.2,
1490                 per_commitment_secret: local_secret,
1491                 next_per_commitment_point: next_local_point,
1492                 #[cfg(taproot)]
1493                 next_local_nonce: None,
1494         };
1495         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa_msg);
1496
1497         let events = nodes[1].node.get_and_clear_pending_msg_events();
1498         assert_eq!(events.len(), 1);
1499         // Make sure the HTLC failed in the way we expect.
1500         match events[0] {
1501                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, .. }, .. } => {
1502                         assert_eq!(update_fail_htlcs.len(), 1);
1503                         update_fail_htlcs[0].clone()
1504                 },
1505                 _ => panic!("Unexpected event"),
1506         };
1507         nodes[1].logger.assert_log("lightning::ln::channel".to_string(),
1508                 format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", ::hex::encode(raa_msg.channel_id)), 1);
1509
1510         check_added_monitors!(nodes[1], 2);
1511 }
1512
1513 #[test]
1514 fn test_chan_reserve_violation_outbound_htlc_inbound_chan() {
1515         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1516         // Set the fee rate for the channel very high, to the point where the fundee
1517         // sending any above-dust amount would result in a channel reserve violation.
1518         // In this test we check that we would be prevented from sending an HTLC in
1519         // this situation.
1520         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1521         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1522         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1523         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1524         let default_config = UserConfig::default();
1525         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1526
1527         let mut push_amt = 100_000_000;
1528         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1529
1530         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1531
1532         let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt);
1533
1534         // Fetch a route in advance as we will be unable to once we're unable to send.
1535         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000);
1536         // Sending exactly enough to hit the reserve amount should be accepted
1537         for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1538                 let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1539         }
1540
1541         // However one more HTLC should be significantly over the reserve amount and fail.
1542         unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash,
1543                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1544                 ), true, APIError::ChannelUnavailable { .. }, {});
1545         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1546 }
1547
1548 #[test]
1549 fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
1550         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1551         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1552         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1553         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1554         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1555         let default_config = UserConfig::default();
1556         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1557
1558         // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
1559         // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
1560         // transaction fee with 0 HTLCs (183 sats)).
1561         let mut push_amt = 100_000_000;
1562         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1563         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1564         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt);
1565
1566         // Send four HTLCs to cover the initial push_msat buffer we're required to include
1567         for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1568                 let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1569         }
1570
1571         let (mut route, payment_hash, _, payment_secret) =
1572                 get_route_and_payment_hash!(nodes[1], nodes[0], 1000);
1573         route.paths[0].hops[0].fee_msat = 700_000;
1574         // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1575         let secp_ctx = Secp256k1::new();
1576         let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
1577         let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1;
1578         let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
1579         let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
1580                 700_000, RecipientOnionFields::secret_only(payment_secret), cur_height, &None).unwrap();
1581         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
1582         let msg = msgs::UpdateAddHTLC {
1583                 channel_id: chan.2,
1584                 htlc_id: MIN_AFFORDABLE_HTLC_COUNT as u64,
1585                 amount_msat: htlc_msat,
1586                 payment_hash: payment_hash,
1587                 cltv_expiry: htlc_cltv,
1588                 onion_routing_packet: onion_packet,
1589                 skimmed_fee_msat: None,
1590         };
1591
1592         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg);
1593         // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
1594         nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string(), 1);
1595         assert_eq!(nodes[0].node.list_channels().len(), 0);
1596         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
1597         assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
1598         check_added_monitors!(nodes[0], 1);
1599         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() },
1600                 [nodes[1].node.get_our_node_id()], 100000);
1601 }
1602
1603 #[test]
1604 fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() {
1605         // Test that if we receive many dust HTLCs over an outbound channel, they don't count when
1606         // calculating our commitment transaction fee (this was previously broken).
1607         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1608         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1609
1610         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1611         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
1612         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1613         let default_config = UserConfig::default();
1614         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1615
1616         // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
1617         // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
1618         // transaction fee with 0 HTLCs (183 sats)).
1619         let mut push_amt = 100_000_000;
1620         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1621         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1622         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt);
1623
1624         let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000
1625                 + feerate_per_kw as u64 * htlc_success_tx_weight(&channel_type_features) / 1000 * 1000 - 1;
1626         // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel
1627         // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the
1628         // commitment transaction fee.
1629         let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], dust_amt);
1630
1631         // Send four HTLCs to cover the initial push_msat buffer we're required to include
1632         for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1633                 let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1634         }
1635
1636         // One more than the dust amt should fail, however.
1637         let (mut route, our_payment_hash, _, our_payment_secret) =
1638                 get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt);
1639         route.paths[0].hops[0].fee_msat += 1;
1640         unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash,
1641                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1642                 ), true, APIError::ChannelUnavailable { .. }, {});
1643 }
1644
1645 #[test]
1646 fn test_chan_init_feerate_unaffordability() {
1647         // Test that we will reject channel opens which do not leave enough to pay for any HTLCs due to
1648         // channel reserve and feerate requirements.
1649         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1650         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1651         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1652         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1653         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1654         let default_config = UserConfig::default();
1655         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1656
1657         // Set the push_msat amount such that nodes[0] will not be able to afford to add even a single
1658         // HTLC.
1659         let mut push_amt = 100_000_000;
1660         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1661         assert_eq!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt + 1, 42, None).unwrap_err(),
1662                 APIError::APIMisuseError { err: "Funding amount (356) can't even pay fee for initial commitment transaction fee of 357.".to_string() });
1663
1664         // During open, we don't have a "counterparty channel reserve" to check against, so that
1665         // requirement only comes into play on the open_channel handling side.
1666         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1667         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt, 42, None).unwrap();
1668         let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
1669         open_channel_msg.push_msat += 1;
1670         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
1671
1672         let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
1673         assert_eq!(msg_events.len(), 1);
1674         match msg_events[0] {
1675                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
1676                         assert_eq!(msg.data, "Insufficient funding amount for initial reserve");
1677                 },
1678                 _ => panic!("Unexpected event"),
1679         }
1680 }
1681
1682 #[test]
1683 fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() {
1684         // Test that if we receive many dust HTLCs over an inbound channel, they don't count when
1685         // calculating our counterparty's commitment transaction fee (this was previously broken).
1686         let chanmon_cfgs = create_chanmon_cfgs(2);
1687         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1688         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
1689         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1690         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 98000000);
1691
1692         let payment_amt = 46000; // Dust amount
1693         // In the previous code, these first four payments would succeed.
1694         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1695         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1696         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1697         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1698
1699         // Then these next 5 would be interpreted by nodes[1] as violating the fee spike buffer.
1700         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1701         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1702         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1703         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1704         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1705
1706         // And this last payment previously resulted in nodes[1] closing on its inbound-channel
1707         // counterparty, because it counted all the previous dust HTLCs against nodes[0]'s commitment
1708         // transaction fee and therefore perceived this next payment as a channel reserve violation.
1709         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1710 }
1711
1712 #[test]
1713 fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
1714         let chanmon_cfgs = create_chanmon_cfgs(3);
1715         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1716         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1717         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1718         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1719         let _ = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000);
1720
1721         let feemsat = 239;
1722         let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat;
1723         let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
1724         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
1725         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
1726
1727         // Add a 2* and +1 for the fee spike reserve.
1728         let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features);
1729         let recv_value_1 = (chan_stat.value_to_self_msat - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlc)/2;
1730         let amt_msat_1 = recv_value_1 + total_routing_fee_msat;
1731
1732         // Add a pending HTLC.
1733         let (route_1, our_payment_hash_1, _, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat_1);
1734         let payment_event_1 = {
1735                 nodes[0].node.send_payment_with_route(&route_1, our_payment_hash_1,
1736                         RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
1737                 check_added_monitors!(nodes[0], 1);
1738
1739                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1740                 assert_eq!(events.len(), 1);
1741                 SendEvent::from_event(events.remove(0))
1742         };
1743         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
1744
1745         // Attempt to trigger a channel reserve violation --> payment failure.
1746         let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2, &channel_type_features);
1747         let recv_value_2 = chan_stat.value_to_self_msat - amt_msat_1 - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlcs + 1;
1748         let amt_msat_2 = recv_value_2 + total_routing_fee_msat;
1749         let mut route_2 = route_1.clone();
1750         route_2.paths[0].hops.last_mut().unwrap().fee_msat = amt_msat_2;
1751
1752         // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1753         let secp_ctx = Secp256k1::new();
1754         let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
1755         let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1;
1756         let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route_2.paths[0], &session_priv).unwrap();
1757         let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
1758                 &route_2.paths[0], recv_value_2, RecipientOnionFields::spontaneous_empty(), cur_height, &None).unwrap();
1759         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash_1).unwrap();
1760         let msg = msgs::UpdateAddHTLC {
1761                 channel_id: chan.2,
1762                 htlc_id: 1,
1763                 amount_msat: htlc_msat + 1,
1764                 payment_hash: our_payment_hash_1,
1765                 cltv_expiry: htlc_cltv,
1766                 onion_routing_packet: onion_packet,
1767                 skimmed_fee_msat: None,
1768         };
1769
1770         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
1771         // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
1772         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote HTLC add would put them under remote reserve value".to_string(), 1);
1773         assert_eq!(nodes[1].node.list_channels().len(), 1);
1774         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
1775         assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
1776         check_added_monitors!(nodes[1], 1);
1777         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() },
1778                 [nodes[0].node.get_our_node_id()], 100000);
1779 }
1780
1781 #[test]
1782 fn test_inbound_outbound_capacity_is_not_zero() {
1783         let chanmon_cfgs = create_chanmon_cfgs(2);
1784         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1785         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1786         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1787         let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1788         let channels0 = node_chanmgrs[0].list_channels();
1789         let channels1 = node_chanmgrs[1].list_channels();
1790         let default_config = UserConfig::default();
1791         assert_eq!(channels0.len(), 1);
1792         assert_eq!(channels1.len(), 1);
1793
1794         let reserve = get_holder_selected_channel_reserve_satoshis(100_000, &default_config);
1795         assert_eq!(channels0[0].inbound_capacity_msat, 95000000 - reserve*1000);
1796         assert_eq!(channels1[0].outbound_capacity_msat, 95000000 - reserve*1000);
1797
1798         assert_eq!(channels0[0].outbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
1799         assert_eq!(channels1[0].inbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
1800 }
1801
1802 fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64, channel_type_features: &ChannelTypeFeatures) -> u64 {
1803         (commitment_tx_base_weight(channel_type_features) + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000
1804 }
1805
1806 #[test]
1807 fn test_channel_reserve_holding_cell_htlcs() {
1808         let chanmon_cfgs = create_chanmon_cfgs(3);
1809         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1810         // When this test was written, the default base fee floated based on the HTLC count.
1811         // It is now fixed, so we simply set the fee to the expected value here.
1812         let mut config = test_default_channel_config();
1813         config.channel_config.forwarding_fee_base_msat = 239;
1814         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
1815         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1816         let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 190000, 1001);
1817         let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 190000, 1001);
1818
1819         let mut stat01 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
1820         let mut stat11 = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2);
1821
1822         let mut stat12 = get_channel_value_stat!(nodes[1], nodes[2], chan_2.2);
1823         let mut stat22 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2);
1824
1825         macro_rules! expect_forward {
1826                 ($node: expr) => {{
1827                         let mut events = $node.node.get_and_clear_pending_msg_events();
1828                         assert_eq!(events.len(), 1);
1829                         check_added_monitors!($node, 1);
1830                         let payment_event = SendEvent::from_event(events.remove(0));
1831                         payment_event
1832                 }}
1833         }
1834
1835         let feemsat = 239; // set above
1836         let total_fee_msat = (nodes.len() - 2) as u64 * feemsat;
1837         let feerate = get_feerate!(nodes[0], nodes[1], chan_1.2);
1838         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_1.2);
1839
1840         let recv_value_0 = stat01.counterparty_max_htlc_value_in_flight_msat - total_fee_msat;
1841
1842         // attempt to send amt_msat > their_max_htlc_value_in_flight_msat
1843         {
1844                 let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
1845                         .with_bolt11_features(nodes[2].node.invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0);
1846                 let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, recv_value_0);
1847                 route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
1848                 assert!(route.paths[0].hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
1849
1850                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1851                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1852                         ), true, APIError::ChannelUnavailable { .. }, {});
1853                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1854         }
1855
1856         // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete
1857         // nodes[0]'s wealth
1858         loop {
1859                 let amt_msat = recv_value_0 + total_fee_msat;
1860                 // 3 for the 3 HTLCs that will be sent, 2* and +1 for the fee spike reserve.
1861                 // Also, ensure that each payment has enough to be over the dust limit to
1862                 // ensure it'll be included in each commit tx fee calculation.
1863                 let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features);
1864                 let ensure_htlc_amounts_above_dust_buffer = 3 * (stat01.counterparty_dust_limit_msat + 1000);
1865                 if stat01.value_to_self_msat < stat01.channel_reserve_msat + commit_tx_fee_all_htlcs + ensure_htlc_amounts_above_dust_buffer + amt_msat {
1866                         break;
1867                 }
1868
1869                 let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
1870                         .with_bolt11_features(nodes[2].node.invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0);
1871                 let route = get_route!(nodes[0], payment_params, recv_value_0).unwrap();
1872                 let (payment_preimage, ..) = send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], recv_value_0);
1873                 claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
1874
1875                 let (stat01_, stat11_, stat12_, stat22_) = (
1876                         get_channel_value_stat!(nodes[0], nodes[1], chan_1.2),
1877                         get_channel_value_stat!(nodes[1], nodes[0], chan_1.2),
1878                         get_channel_value_stat!(nodes[1], nodes[2], chan_2.2),
1879                         get_channel_value_stat!(nodes[2], nodes[1], chan_2.2),
1880                 );
1881
1882                 assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat);
1883                 assert_eq!(stat11_.value_to_self_msat, stat11.value_to_self_msat + amt_msat);
1884                 assert_eq!(stat12_.value_to_self_msat, stat12.value_to_self_msat - (amt_msat - feemsat));
1885                 assert_eq!(stat22_.value_to_self_msat, stat22.value_to_self_msat + (amt_msat - feemsat));
1886                 stat01 = stat01_; stat11 = stat11_; stat12 = stat12_; stat22 = stat22_;
1887         }
1888
1889         // adding pending output.
1890         // 2* and +1 HTLCs on the commit tx fee for the fee spike reserve.
1891         // The reason we're dividing by two here is as follows: the dividend is the total outbound liquidity
1892         // after fees, the channel reserve, and the fee spike buffer are removed. We eventually want to
1893         // divide this quantity into 3 portions, that will each be sent in an HTLC. This allows us
1894         // to test channel channel reserve policy at the edges of what amount is sendable, i.e.
1895         // cases where 1 msat over X amount will cause a payment failure, but anything less than
1896         // that can be sent successfully. So, dividing by two is a somewhat arbitrary way of getting
1897         // the amount of the first of these aforementioned 3 payments. The reason we split into 3 payments
1898         // is to test the behavior of the holding cell with respect to channel reserve and commit tx fee
1899         // policy.
1900         let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features);
1901         let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs)/2;
1902         let amt_msat_1 = recv_value_1 + total_fee_msat;
1903
1904         let (route_1, our_payment_hash_1, our_payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_1);
1905         let payment_event_1 = {
1906                 nodes[0].node.send_payment_with_route(&route_1, our_payment_hash_1,
1907                         RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
1908                 check_added_monitors!(nodes[0], 1);
1909
1910                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1911                 assert_eq!(events.len(), 1);
1912                 SendEvent::from_event(events.remove(0))
1913         };
1914         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
1915
1916         // channel reserve test with htlc pending output > 0
1917         let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs;
1918         {
1919                 let mut route = route_1.clone();
1920                 route.paths[0].hops.last_mut().unwrap().fee_msat = recv_value_2 + 1;
1921                 let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]);
1922                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1923                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1924                         ), true, APIError::ChannelUnavailable { .. }, {});
1925                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1926         }
1927
1928         // split the rest to test holding cell
1929         let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features);
1930         let additional_htlc_cost_msat = commit_tx_fee_3_htlcs - commit_tx_fee_2_htlcs;
1931         let recv_value_21 = recv_value_2/2 - additional_htlc_cost_msat/2;
1932         let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat - additional_htlc_cost_msat;
1933         {
1934                 let stat = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
1935                 assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat + commit_tx_fee_3_htlcs), stat.channel_reserve_msat);
1936         }
1937
1938         // now see if they go through on both sides
1939         let (route_21, our_payment_hash_21, our_payment_preimage_21, our_payment_secret_21) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_21);
1940         // but this will stuck in the holding cell
1941         nodes[0].node.send_payment_with_route(&route_21, our_payment_hash_21,
1942                 RecipientOnionFields::secret_only(our_payment_secret_21), PaymentId(our_payment_hash_21.0)).unwrap();
1943         check_added_monitors!(nodes[0], 0);
1944         let events = nodes[0].node.get_and_clear_pending_events();
1945         assert_eq!(events.len(), 0);
1946
1947         // test with outbound holding cell amount > 0
1948         {
1949                 let (mut route, our_payment_hash, _, our_payment_secret) =
1950                         get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22);
1951                 route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
1952                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1953                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1954                         ), true, APIError::ChannelUnavailable { .. }, {});
1955                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1956         }
1957
1958         let (route_22, our_payment_hash_22, our_payment_preimage_22, our_payment_secret_22) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22);
1959         // this will also stuck in the holding cell
1960         nodes[0].node.send_payment_with_route(&route_22, our_payment_hash_22,
1961                 RecipientOnionFields::secret_only(our_payment_secret_22), PaymentId(our_payment_hash_22.0)).unwrap();
1962         check_added_monitors!(nodes[0], 0);
1963         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
1964         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1965
1966         // flush the pending htlc
1967         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg);
1968         let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1969         check_added_monitors!(nodes[1], 1);
1970
1971         // the pending htlc should be promoted to committed
1972         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack);
1973         check_added_monitors!(nodes[0], 1);
1974         let commitment_update_2 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1975
1976         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed);
1977         let bs_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1978         // No commitment_signed so get_event_msg's assert(len == 1) passes
1979         check_added_monitors!(nodes[0], 1);
1980
1981         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack);
1982         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1983         check_added_monitors!(nodes[1], 1);
1984
1985         expect_pending_htlcs_forwardable!(nodes[1]);
1986
1987         let ref payment_event_11 = expect_forward!(nodes[1]);
1988         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]);
1989         commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false);
1990
1991         expect_pending_htlcs_forwardable!(nodes[2]);
1992         expect_payment_claimable!(nodes[2], our_payment_hash_1, our_payment_secret_1, recv_value_1);
1993
1994         // flush the htlcs in the holding cell
1995         assert_eq!(commitment_update_2.update_add_htlcs.len(), 2);
1996         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]);
1997         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]);
1998         commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false);
1999         expect_pending_htlcs_forwardable!(nodes[1]);
2000
2001         let ref payment_event_3 = expect_forward!(nodes[1]);
2002         assert_eq!(payment_event_3.msgs.len(), 2);
2003         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[0]);
2004         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]);
2005
2006         commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false);
2007         expect_pending_htlcs_forwardable!(nodes[2]);
2008
2009         let events = nodes[2].node.get_and_clear_pending_events();
2010         assert_eq!(events.len(), 2);
2011         match events[0] {
2012                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
2013                         assert_eq!(our_payment_hash_21, *payment_hash);
2014                         assert_eq!(recv_value_21, amount_msat);
2015                         assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
2016                         assert_eq!(via_channel_id, Some(chan_2.2));
2017                         match &purpose {
2018                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
2019                                         assert!(payment_preimage.is_none());
2020                                         assert_eq!(our_payment_secret_21, *payment_secret);
2021                                 },
2022                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
2023                         }
2024                 },
2025                 _ => panic!("Unexpected event"),
2026         }
2027         match events[1] {
2028                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
2029                         assert_eq!(our_payment_hash_22, *payment_hash);
2030                         assert_eq!(recv_value_22, amount_msat);
2031                         assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
2032                         assert_eq!(via_channel_id, Some(chan_2.2));
2033                         match &purpose {
2034                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
2035                                         assert!(payment_preimage.is_none());
2036                                         assert_eq!(our_payment_secret_22, *payment_secret);
2037                                 },
2038                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
2039                         }
2040                 },
2041                 _ => panic!("Unexpected event"),
2042         }
2043
2044         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_1);
2045         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21);
2046         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22);
2047
2048         let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1, &channel_type_features);
2049         let recv_value_3 = commit_tx_fee_2_htlcs - commit_tx_fee_0_htlcs - total_fee_msat;
2050         send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_3);
2051
2052         let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
2053         let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat) - (recv_value_3 + total_fee_msat);
2054         let stat0 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
2055         assert_eq!(stat0.value_to_self_msat, expected_value_to_self);
2056         assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat + commit_tx_fee_1_htlc);
2057
2058         let stat2 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2);
2059         assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22 + recv_value_3);
2060 }
2061
2062 #[test]
2063 fn channel_reserve_in_flight_removes() {
2064         // In cases where one side claims an HTLC, it thinks it has additional available funds that it
2065         // can send to its counterparty, but due to update ordering, the other side may not yet have
2066         // considered those HTLCs fully removed.
2067         // This tests that we don't count HTLCs which will not be included in the next remote
2068         // commitment transaction towards the reserve value (as it implies no commitment transaction
2069         // will be generated which violates the remote reserve value).
2070         // This was broken previously, and discovered by the chanmon_fail_consistency fuzz test.
2071         // To test this we:
2072         //  * route two HTLCs from A to B (note that, at a high level, this test is checking that, when
2073         //    you consider the values of both of these HTLCs, B may not send an HTLC back to A, but if
2074         //    you only consider the value of the first HTLC, it may not),
2075         //  * start routing a third HTLC from A to B,
2076         //  * claim the first two HTLCs (though B will generate an update_fulfill for one, and put
2077         //    the other claim in its holding cell, as it immediately goes into AwaitingRAA),
2078         //  * deliver the first fulfill from B
2079         //  * deliver the update_add and an RAA from A, resulting in B freeing the second holding cell
2080         //    claim,
2081         //  * deliver A's response CS and RAA.
2082         //    This results in A having the second HTLC in AwaitingRemovedRemoteRevoke, but B having
2083         //    removed it fully. B now has the push_msat plus the first two HTLCs in value.
2084         //  * Now B happily sends another HTLC, potentially violating its reserve value from A's point
2085         //    of view (if A counts the AwaitingRemovedRemoteRevoke HTLC).
2086         let chanmon_cfgs = create_chanmon_cfgs(2);
2087         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2088         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2089         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2090         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2091
2092         let b_chan_values = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2);
2093         // Route the first two HTLCs.
2094         let payment_value_1 = b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000;
2095         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], payment_value_1);
2096         let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 20_000);
2097
2098         // Start routing the third HTLC (this is just used to get everyone in the right state).
2099         let (route, payment_hash_3, payment_preimage_3, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
2100         let send_1 = {
2101                 nodes[0].node.send_payment_with_route(&route, payment_hash_3,
2102                         RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
2103                 check_added_monitors!(nodes[0], 1);
2104                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2105                 assert_eq!(events.len(), 1);
2106                 SendEvent::from_event(events.remove(0))
2107         };
2108
2109         // Now claim both of the first two HTLCs on B's end, putting B in AwaitingRAA and generating an
2110         // initial fulfill/CS.
2111         nodes[1].node.claim_funds(payment_preimage_1);
2112         expect_payment_claimed!(nodes[1], payment_hash_1, payment_value_1);
2113         check_added_monitors!(nodes[1], 1);
2114         let bs_removes = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2115
2116         // This claim goes in B's holding cell, allowing us to have a pending B->A RAA which does not
2117         // remove the second HTLC when we send the HTLC back from B to A.
2118         nodes[1].node.claim_funds(payment_preimage_2);
2119         expect_payment_claimed!(nodes[1], payment_hash_2, 20_000);
2120         check_added_monitors!(nodes[1], 1);
2121         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2122
2123         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_removes.update_fulfill_htlcs[0]);
2124         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed);
2125         check_added_monitors!(nodes[0], 1);
2126         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2127         expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
2128
2129         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]);
2130         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg);
2131         check_added_monitors!(nodes[1], 1);
2132         // B is already AwaitingRAA, so cant generate a CS here
2133         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2134
2135         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2136         check_added_monitors!(nodes[1], 1);
2137         let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2138
2139         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2140         check_added_monitors!(nodes[0], 1);
2141         let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2142
2143         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2144         check_added_monitors!(nodes[1], 1);
2145         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2146
2147         // The second HTLCis removed, but as A is in AwaitingRAA it can't generate a CS here, so the
2148         // RAA that B generated above doesn't fully resolve the second HTLC from A's point of view.
2149         // However, the RAA A generates here *does* fully resolve the HTLC from B's point of view (as A
2150         // can no longer broadcast a commitment transaction with it and B has the preimage so can go
2151         // on-chain as necessary).
2152         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_cs.update_fulfill_htlcs[0]);
2153         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
2154         check_added_monitors!(nodes[0], 1);
2155         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2156         expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false);
2157
2158         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2159         check_added_monitors!(nodes[1], 1);
2160         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2161
2162         expect_pending_htlcs_forwardable!(nodes[1]);
2163         expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 100000);
2164
2165         // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't
2166         // resolve the second HTLC from A's point of view.
2167         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2168         check_added_monitors!(nodes[0], 1);
2169         expect_payment_path_successful!(nodes[0]);
2170         let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2171
2172         // Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back
2173         // to A to ensure that A doesn't count the almost-removed HTLC in update_add processing.
2174         let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[1], nodes[0], 10000);
2175         let send_2 = {
2176                 nodes[1].node.send_payment_with_route(&route, payment_hash_4,
2177                         RecipientOnionFields::secret_only(payment_secret_4), PaymentId(payment_hash_4.0)).unwrap();
2178                 check_added_monitors!(nodes[1], 1);
2179                 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
2180                 assert_eq!(events.len(), 1);
2181                 SendEvent::from_event(events.remove(0))
2182         };
2183
2184         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_2.msgs[0]);
2185         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_2.commitment_msg);
2186         check_added_monitors!(nodes[0], 1);
2187         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2188
2189         // Now just resolve all the outstanding messages/HTLCs for completeness...
2190
2191         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2192         check_added_monitors!(nodes[1], 1);
2193         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2194
2195         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2196         check_added_monitors!(nodes[1], 1);
2197
2198         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2199         check_added_monitors!(nodes[0], 1);
2200         expect_payment_path_successful!(nodes[0]);
2201         let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2202
2203         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2204         check_added_monitors!(nodes[1], 1);
2205         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2206
2207         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2208         check_added_monitors!(nodes[0], 1);
2209
2210         expect_pending_htlcs_forwardable!(nodes[0]);
2211         expect_payment_claimable!(nodes[0], payment_hash_4, payment_secret_4, 10000);
2212
2213         claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4);
2214         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
2215 }
2216
2217 #[test]
2218 fn channel_monitor_network_test() {
2219         // Simple test which builds a network of ChannelManagers, connects them to each other, and
2220         // tests that ChannelMonitor is able to recover from various states.
2221         let chanmon_cfgs = create_chanmon_cfgs(5);
2222         let node_cfgs = create_node_cfgs(5, &chanmon_cfgs);
2223         let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &[None, None, None, None, None]);
2224         let nodes = create_network(5, &node_cfgs, &node_chanmgrs);
2225
2226         // Create some initial channels
2227         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2228         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
2229         let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
2230         let chan_4 = create_announced_chan_between_nodes(&nodes, 3, 4);
2231
2232         // Make sure all nodes are at the same starting height
2233         connect_blocks(&nodes[0], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
2234         connect_blocks(&nodes[1], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
2235         connect_blocks(&nodes[2], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
2236         connect_blocks(&nodes[3], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[3].best_block_info().1);
2237         connect_blocks(&nodes[4], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[4].best_block_info().1);
2238
2239         // Rebalance the network a bit by relaying one payment through all the channels...
2240         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2241         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2242         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2243         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2244
2245         // Simple case with no pending HTLCs:
2246         nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
2247         check_added_monitors!(nodes[1], 1);
2248         check_closed_broadcast!(nodes[1], true);
2249         {
2250                 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
2251                 assert_eq!(node_txn.len(), 1);
2252                 mine_transaction(&nodes[0], &node_txn[0]);
2253                 check_added_monitors!(nodes[0], 1);
2254                 test_txn_broadcast(&nodes[0], &chan_1, Some(node_txn[0].clone()), HTLCType::NONE);
2255         }
2256         check_closed_broadcast!(nodes[0], true);
2257         assert_eq!(nodes[0].node.list_channels().len(), 0);
2258         assert_eq!(nodes[1].node.list_channels().len(), 1);
2259         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2260         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
2261
2262         // One pending HTLC is discarded by the force-close:
2263         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000);
2264
2265         // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
2266         // broadcasted until we reach the timelock time).
2267         nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
2268         check_closed_broadcast!(nodes[1], true);
2269         check_added_monitors!(nodes[1], 1);
2270         {
2271                 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE);
2272                 connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1);
2273                 test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
2274                 mine_transaction(&nodes[2], &node_txn[0]);
2275                 check_added_monitors!(nodes[2], 1);
2276                 test_txn_broadcast(&nodes[2], &chan_2, Some(node_txn[0].clone()), HTLCType::NONE);
2277         }
2278         check_closed_broadcast!(nodes[2], true);
2279         assert_eq!(nodes[1].node.list_channels().len(), 0);
2280         assert_eq!(nodes[2].node.list_channels().len(), 1);
2281         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
2282         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2283
2284         macro_rules! claim_funds {
2285                 ($node: expr, $prev_node: expr, $preimage: expr, $payment_hash: expr) => {
2286                         {
2287                                 $node.node.claim_funds($preimage);
2288                                 expect_payment_claimed!($node, $payment_hash, 3_000_000);
2289                                 check_added_monitors!($node, 1);
2290
2291                                 let events = $node.node.get_and_clear_pending_msg_events();
2292                                 assert_eq!(events.len(), 1);
2293                                 match events[0] {
2294                                         MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. } } => {
2295                                                 assert!(update_add_htlcs.is_empty());
2296                                                 assert!(update_fail_htlcs.is_empty());
2297                                                 assert_eq!(*node_id, $prev_node.node.get_our_node_id());
2298                                         },
2299                                         _ => panic!("Unexpected event"),
2300                                 };
2301                         }
2302                 }
2303         }
2304
2305         // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
2306         // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
2307         nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap();
2308         check_added_monitors!(nodes[2], 1);
2309         check_closed_broadcast!(nodes[2], true);
2310         let node2_commitment_txid;
2311         {
2312                 let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE);
2313                 connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1);
2314                 test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT);
2315                 node2_commitment_txid = node_txn[0].txid();
2316
2317                 // Claim the payment on nodes[3], giving it knowledge of the preimage
2318                 claim_funds!(nodes[3], nodes[2], payment_preimage_1, payment_hash_1);
2319                 mine_transaction(&nodes[3], &node_txn[0]);
2320                 check_added_monitors!(nodes[3], 1);
2321                 check_preimage_claim(&nodes[3], &node_txn);
2322         }
2323         check_closed_broadcast!(nodes[3], true);
2324         assert_eq!(nodes[2].node.list_channels().len(), 0);
2325         assert_eq!(nodes[3].node.list_channels().len(), 1);
2326         check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000);
2327         check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
2328
2329         // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
2330         // confusing us in the following tests.
2331         let chan_3_mon = nodes[3].chain_monitor.chain_monitor.remove_monitor(&OutPoint { txid: chan_3.3.txid(), index: 0 });
2332
2333         // One pending HTLC to time out:
2334         let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[3], &[&nodes[4]], 3_000_000);
2335         // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for
2336         // buffer space).
2337
2338         let (close_chan_update_1, close_chan_update_2) = {
2339                 connect_blocks(&nodes[3], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
2340                 let events = nodes[3].node.get_and_clear_pending_msg_events();
2341                 assert_eq!(events.len(), 2);
2342                 let close_chan_update_1 = match events[0] {
2343                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
2344                                 msg.clone()
2345                         },
2346                         _ => panic!("Unexpected event"),
2347                 };
2348                 match events[1] {
2349                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id } => {
2350                                 assert_eq!(node_id, nodes[4].node.get_our_node_id());
2351                         },
2352                         _ => panic!("Unexpected event"),
2353                 }
2354                 check_added_monitors!(nodes[3], 1);
2355
2356                 // Clear bumped claiming txn spending node 2 commitment tx. Bumped txn are generated after reaching some height timer.
2357                 {
2358                         let mut node_txn = nodes[3].tx_broadcaster.txn_broadcasted.lock().unwrap();
2359                         node_txn.retain(|tx| {
2360                                 if tx.input[0].previous_output.txid == node2_commitment_txid {
2361                                         false
2362                                 } else { true }
2363                         });
2364                 }
2365
2366                 let node_txn = test_txn_broadcast(&nodes[3], &chan_4, None, HTLCType::TIMEOUT);
2367
2368                 // Claim the payment on nodes[4], giving it knowledge of the preimage
2369                 claim_funds!(nodes[4], nodes[3], payment_preimage_2, payment_hash_2);
2370
2371                 connect_blocks(&nodes[4], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2);
2372                 let events = nodes[4].node.get_and_clear_pending_msg_events();
2373                 assert_eq!(events.len(), 2);
2374                 let close_chan_update_2 = match events[0] {
2375                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
2376                                 msg.clone()
2377                         },
2378                         _ => panic!("Unexpected event"),
2379                 };
2380                 match events[1] {
2381                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id } => {
2382                                 assert_eq!(node_id, nodes[3].node.get_our_node_id());
2383                         },
2384                         _ => panic!("Unexpected event"),
2385                 }
2386                 check_added_monitors!(nodes[4], 1);
2387                 test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
2388
2389                 mine_transaction(&nodes[4], &node_txn[0]);
2390                 check_preimage_claim(&nodes[4], &node_txn);
2391                 (close_chan_update_1, close_chan_update_2)
2392         };
2393         nodes[3].gossip_sync.handle_channel_update(&close_chan_update_2).unwrap();
2394         nodes[4].gossip_sync.handle_channel_update(&close_chan_update_1).unwrap();
2395         assert_eq!(nodes[3].node.list_channels().len(), 0);
2396         assert_eq!(nodes[4].node.list_channels().len(), 0);
2397
2398         assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon),
2399                 ChannelMonitorUpdateStatus::Completed);
2400         check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [nodes[4].node.get_our_node_id()], 100000);
2401         check_closed_event!(nodes[4], 1, ClosureReason::CommitmentTxConfirmed, [nodes[3].node.get_our_node_id()], 100000);
2402 }
2403
2404 #[test]
2405 fn test_justice_tx_htlc_timeout() {
2406         // Test justice txn built on revoked HTLC-Timeout tx, against both sides
2407         let mut alice_config = UserConfig::default();
2408         alice_config.channel_handshake_config.announced_channel = true;
2409         alice_config.channel_handshake_limits.force_announced_channel_preference = false;
2410         alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5;
2411         let mut bob_config = UserConfig::default();
2412         bob_config.channel_handshake_config.announced_channel = true;
2413         bob_config.channel_handshake_limits.force_announced_channel_preference = false;
2414         bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3;
2415         let user_cfgs = [Some(alice_config), Some(bob_config)];
2416         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2417         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2418         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
2419         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2420         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
2421         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2422         // Create some new channels:
2423         let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1);
2424
2425         // A pending HTLC which will be revoked:
2426         let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2427         // Get the will-be-revoked local txn from nodes[0]
2428         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_5.2);
2429         assert_eq!(revoked_local_txn.len(), 2); // First commitment tx, then HTLC tx
2430         assert_eq!(revoked_local_txn[0].input.len(), 1);
2431         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_5.3.txid());
2432         assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to 0 are present
2433         assert_eq!(revoked_local_txn[1].input.len(), 1);
2434         assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
2435         assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
2436         // Revoke the old state
2437         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
2438
2439         {
2440                 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2441                 {
2442                         let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
2443                         assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2444                         assert_eq!(node_txn[0].input.len(), 2); // We should claim the revoked output and the HTLC output
2445                         check_spends!(node_txn[0], revoked_local_txn[0]);
2446                         node_txn.swap_remove(0);
2447                 }
2448                 check_added_monitors!(nodes[1], 1);
2449                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2450                 test_txn_broadcast(&nodes[1], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
2451
2452                 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2453                 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
2454                 // Verify broadcast of revoked HTLC-timeout
2455                 let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
2456                 check_added_monitors!(nodes[0], 1);
2457                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2458                 // Broadcast revoked HTLC-timeout on node 1
2459                 mine_transaction(&nodes[1], &node_txn[1]);
2460                 test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone());
2461         }
2462         get_announce_close_broadcast_events(&nodes, 0, 1);
2463         assert_eq!(nodes[0].node.list_channels().len(), 0);
2464         assert_eq!(nodes[1].node.list_channels().len(), 0);
2465 }
2466
2467 #[test]
2468 fn test_justice_tx_htlc_success() {
2469         // Test justice txn built on revoked HTLC-Success tx, against both sides
2470         let mut alice_config = UserConfig::default();
2471         alice_config.channel_handshake_config.announced_channel = true;
2472         alice_config.channel_handshake_limits.force_announced_channel_preference = false;
2473         alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5;
2474         let mut bob_config = UserConfig::default();
2475         bob_config.channel_handshake_config.announced_channel = true;
2476         bob_config.channel_handshake_limits.force_announced_channel_preference = false;
2477         bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3;
2478         let user_cfgs = [Some(alice_config), Some(bob_config)];
2479         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2480         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2481         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
2482         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2483         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
2484         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2485         // Create some new channels:
2486         let chan_6 = create_announced_chan_between_nodes(&nodes, 0, 1);
2487
2488         // A pending HTLC which will be revoked:
2489         let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2490         // Get the will-be-revoked local txn from B
2491         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_6.2);
2492         assert_eq!(revoked_local_txn.len(), 1); // Only commitment tx
2493         assert_eq!(revoked_local_txn[0].input.len(), 1);
2494         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_6.3.txid());
2495         assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to A are present
2496         // Revoke the old state
2497         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_4);
2498         {
2499                 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2500                 {
2501                         let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
2502                         assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2503                         assert_eq!(node_txn[0].input.len(), 1); // We claim the received HTLC output
2504
2505                         check_spends!(node_txn[0], revoked_local_txn[0]);
2506                         node_txn.swap_remove(0);
2507                 }
2508                 check_added_monitors!(nodes[0], 1);
2509                 test_txn_broadcast(&nodes[0], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
2510
2511                 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2512                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2513                 let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
2514                 check_added_monitors!(nodes[1], 1);
2515                 mine_transaction(&nodes[0], &node_txn[1]);
2516                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2517                 test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone());
2518         }
2519         get_announce_close_broadcast_events(&nodes, 0, 1);
2520         assert_eq!(nodes[0].node.list_channels().len(), 0);
2521         assert_eq!(nodes[1].node.list_channels().len(), 0);
2522 }
2523
2524 #[test]
2525 fn revoked_output_claim() {
2526         // Simple test to ensure a node will claim a revoked output when a stale remote commitment
2527         // transaction is broadcast by its counterparty
2528         let chanmon_cfgs = create_chanmon_cfgs(2);
2529         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2530         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2531         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2532         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2533         // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output
2534         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2535         assert_eq!(revoked_local_txn.len(), 1);
2536         // Only output is the full channel value back to nodes[0]:
2537         assert_eq!(revoked_local_txn[0].output.len(), 1);
2538         // Send a payment through, updating everyone's latest commitment txn
2539         send_payment(&nodes[0], &vec!(&nodes[1])[..], 5000000);
2540
2541         // Inform nodes[1] that nodes[0] broadcast a stale tx
2542         mine_transaction(&nodes[1], &revoked_local_txn[0]);
2543         check_added_monitors!(nodes[1], 1);
2544         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2545         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
2546         assert_eq!(node_txn.len(), 1); // ChannelMonitor: justice tx against revoked to_local output
2547
2548         check_spends!(node_txn[0], revoked_local_txn[0]);
2549
2550         // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
2551         mine_transaction(&nodes[0], &revoked_local_txn[0]);
2552         get_announce_close_broadcast_events(&nodes, 0, 1);
2553         check_added_monitors!(nodes[0], 1);
2554         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2555 }
2556
2557 #[test]
2558 fn test_forming_justice_tx_from_monitor_updates() {
2559         do_test_forming_justice_tx_from_monitor_updates(true);
2560         do_test_forming_justice_tx_from_monitor_updates(false);
2561 }
2562
2563 fn do_test_forming_justice_tx_from_monitor_updates(broadcast_initial_commitment: bool) {
2564         // Simple test to make sure that the justice tx formed in WatchtowerPersister
2565         // is properly formed and can be broadcasted/confirmed successfully in the event
2566         // that a revoked commitment transaction is broadcasted
2567         // (Similar to `revoked_output_claim` test but we get the justice tx + broadcast manually)
2568         let chanmon_cfgs = create_chanmon_cfgs(2);
2569         let destination_script0 = chanmon_cfgs[0].keys_manager.get_destination_script().unwrap();
2570         let destination_script1 = chanmon_cfgs[1].keys_manager.get_destination_script().unwrap();
2571         let persisters = vec![WatchtowerPersister::new(destination_script0),
2572                 WatchtowerPersister::new(destination_script1)];
2573         let node_cfgs = create_node_cfgs_with_persisters(2, &chanmon_cfgs, persisters.iter().collect());
2574         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2575         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2576         let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
2577         let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 };
2578
2579         if !broadcast_initial_commitment {
2580                 // Send a payment to move the channel forward
2581                 send_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000);
2582         }
2583
2584         // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output.
2585         // We'll keep this commitment transaction to broadcast once it's revoked.
2586         let revoked_local_txn = get_local_commitment_txn!(nodes[0], channel_id);
2587         assert_eq!(revoked_local_txn.len(), 1);
2588         let revoked_commitment_tx = &revoked_local_txn[0];
2589
2590         // Send another payment, now revoking the previous commitment tx
2591         send_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000);
2592
2593         let justice_tx = persisters[1].justice_tx(funding_txo, &revoked_commitment_tx.txid()).unwrap();
2594         check_spends!(justice_tx, revoked_commitment_tx);
2595
2596         mine_transactions(&nodes[1], &[revoked_commitment_tx, &justice_tx]);
2597         mine_transactions(&nodes[0], &[revoked_commitment_tx, &justice_tx]);
2598
2599         check_added_monitors!(nodes[1], 1);
2600         check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false,
2601                 &[nodes[0].node.get_our_node_id()], 100_000);
2602         get_announce_close_broadcast_events(&nodes, 1, 0);
2603
2604         check_added_monitors!(nodes[0], 1);
2605         check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false,
2606                 &[nodes[1].node.get_our_node_id()], 100_000);
2607
2608         // Check that the justice tx has sent the revoked output value to nodes[1]
2609         let monitor = get_monitor!(nodes[1], channel_id);
2610         let total_claimable_balance = monitor.get_claimable_balances().iter().fold(0, |sum, balance| {
2611                 match balance {
2612                         channelmonitor::Balance::ClaimableAwaitingConfirmations { amount_satoshis, .. } => sum + amount_satoshis,
2613                         _ => panic!("Unexpected balance type"),
2614                 }
2615         });
2616         // On the first commitment, node[1]'s balance was below dust so it didn't have an output
2617         let node1_channel_balance = if broadcast_initial_commitment { 0 } else { revoked_commitment_tx.output[0].value };
2618         let expected_claimable_balance = node1_channel_balance + justice_tx.output[0].value;
2619         assert_eq!(total_claimable_balance, expected_claimable_balance);
2620 }
2621
2622
2623 #[test]
2624 fn claim_htlc_outputs_shared_tx() {
2625         // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx
2626         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2627         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2628         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2629         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2630         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2631
2632         // Create some new channel:
2633         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2634
2635         // Rebalance the network to generate htlc in the two directions
2636         send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
2637         // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx
2638         let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
2639         let (_payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
2640
2641         // Get the will-be-revoked local txn from node[0]
2642         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2643         assert_eq!(revoked_local_txn.len(), 2); // commitment tx + 1 HTLC-Timeout tx
2644         assert_eq!(revoked_local_txn[0].input.len(), 1);
2645         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
2646         assert_eq!(revoked_local_txn[1].input.len(), 1);
2647         assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
2648         assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
2649         check_spends!(revoked_local_txn[1], revoked_local_txn[0]);
2650
2651         //Revoke the old state
2652         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
2653
2654         {
2655                 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2656                 check_added_monitors!(nodes[0], 1);
2657                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2658                 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2659                 check_added_monitors!(nodes[1], 1);
2660                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2661                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2662                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
2663
2664                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2665                 assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2666
2667                 assert_eq!(node_txn[0].input.len(), 3); // Claim the revoked output + both revoked HTLC outputs
2668                 check_spends!(node_txn[0], revoked_local_txn[0]);
2669
2670                 let mut witness_lens = BTreeSet::new();
2671                 witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
2672                 witness_lens.insert(node_txn[0].input[1].witness.last().unwrap().len());
2673                 witness_lens.insert(node_txn[0].input[2].witness.last().unwrap().len());
2674                 assert_eq!(witness_lens.len(), 3);
2675                 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
2676                 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
2677                 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
2678
2679                 // Finally, mine the penalty transaction and check that we get an HTLC failure after
2680                 // ANTI_REORG_DELAY confirmations.
2681                 mine_transaction(&nodes[1], &node_txn[0]);
2682                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2683                 expect_payment_failed!(nodes[1], payment_hash_2, false);
2684         }
2685         get_announce_close_broadcast_events(&nodes, 0, 1);
2686         assert_eq!(nodes[0].node.list_channels().len(), 0);
2687         assert_eq!(nodes[1].node.list_channels().len(), 0);
2688 }
2689
2690 #[test]
2691 fn claim_htlc_outputs_single_tx() {
2692         // Node revoked old state, htlcs have timed out, claim each of them in separated justice tx
2693         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2694         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2695         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2696         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2697         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2698
2699         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2700
2701         // Rebalance the network to generate htlc in the two directions
2702         send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
2703         // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this
2704         // time as two different claim transactions as we're gonna to timeout htlc with given a high current height
2705         let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
2706         let (_payment_preimage_2, payment_hash_2, _payment_secret_2) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
2707
2708         // Get the will-be-revoked local txn from node[0]
2709         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2710
2711         //Revoke the old state
2712         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
2713
2714         {
2715                 confirm_transaction_at(&nodes[0], &revoked_local_txn[0], 100);
2716                 check_added_monitors!(nodes[0], 1);
2717                 confirm_transaction_at(&nodes[1], &revoked_local_txn[0], 100);
2718                 check_added_monitors!(nodes[1], 1);
2719                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2720                 let mut events = nodes[0].node.get_and_clear_pending_events();
2721                 expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
2722                 match events.last().unwrap() {
2723                         Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
2724                         _ => panic!("Unexpected event"),
2725                 }
2726
2727                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2728                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
2729
2730                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcast();
2731
2732                 // Check the pair local commitment and HTLC-timeout broadcast due to HTLC expiration
2733                 assert_eq!(node_txn[0].input.len(), 1);
2734                 check_spends!(node_txn[0], chan_1.3);
2735                 assert_eq!(node_txn[1].input.len(), 1);
2736                 let witness_script = node_txn[1].input[0].witness.last().unwrap();
2737                 assert_eq!(witness_script.len(), OFFERED_HTLC_SCRIPT_WEIGHT); //Spending an offered htlc output
2738                 check_spends!(node_txn[1], node_txn[0]);
2739
2740                 // Filter out any non justice transactions.
2741                 node_txn.retain(|tx| tx.input[0].previous_output.txid == revoked_local_txn[0].txid());
2742                 assert!(node_txn.len() > 3);
2743
2744                 assert_eq!(node_txn[0].input.len(), 1);
2745                 assert_eq!(node_txn[1].input.len(), 1);
2746                 assert_eq!(node_txn[2].input.len(), 1);
2747
2748                 check_spends!(node_txn[0], revoked_local_txn[0]);
2749                 check_spends!(node_txn[1], revoked_local_txn[0]);
2750                 check_spends!(node_txn[2], revoked_local_txn[0]);
2751
2752                 let mut witness_lens = BTreeSet::new();
2753                 witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
2754                 witness_lens.insert(node_txn[1].input[0].witness.last().unwrap().len());
2755                 witness_lens.insert(node_txn[2].input[0].witness.last().unwrap().len());
2756                 assert_eq!(witness_lens.len(), 3);
2757                 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
2758                 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
2759                 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
2760
2761                 // Finally, mine the penalty transactions and check that we get an HTLC failure after
2762                 // ANTI_REORG_DELAY confirmations.
2763                 mine_transaction(&nodes[1], &node_txn[0]);
2764                 mine_transaction(&nodes[1], &node_txn[1]);
2765                 mine_transaction(&nodes[1], &node_txn[2]);
2766                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2767                 expect_payment_failed!(nodes[1], payment_hash_2, false);
2768         }
2769         get_announce_close_broadcast_events(&nodes, 0, 1);
2770         assert_eq!(nodes[0].node.list_channels().len(), 0);
2771         assert_eq!(nodes[1].node.list_channels().len(), 0);
2772 }
2773
2774 #[test]
2775 fn test_htlc_on_chain_success() {
2776         // Test that in case of a unilateral close onchain, we detect the state of output and pass
2777         // the preimage backward accordingly. So here we test that ChannelManager is
2778         // broadcasting the right event to other nodes in payment path.
2779         // We test with two HTLCs simultaneously as that was not handled correctly in the past.
2780         // A --------------------> B ----------------------> C (preimage)
2781         // First, C should claim the HTLC outputs via HTLC-Success when its own latest local
2782         // commitment transaction was broadcast.
2783         // Then, B should learn the preimage from said transactions, attempting to claim backwards
2784         // towards B.
2785         // B should be able to claim via preimage if A then broadcasts its local tx.
2786         // Finally, when A sees B's latest local commitment transaction it should be able to claim
2787         // the HTLC outputs via the preimage it learned (which, once confirmed should generate a
2788         // PaymentSent event).
2789
2790         let chanmon_cfgs = create_chanmon_cfgs(3);
2791         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2792         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2793         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2794
2795         // Create some initial channels
2796         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2797         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
2798
2799         // Ensure all nodes are at the same height
2800         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
2801         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
2802         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
2803         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
2804
2805         // Rebalance the network a bit by relaying one payment through all the channels...
2806         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2807         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2808
2809         let (our_payment_preimage, payment_hash_1, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
2810         let (our_payment_preimage_2, payment_hash_2, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
2811
2812         // Broadcast legit commitment tx from C on B's chain
2813         // Broadcast HTLC Success transaction by C on received output from C's commitment tx on B's chain
2814         let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
2815         assert_eq!(commitment_tx.len(), 1);
2816         check_spends!(commitment_tx[0], chan_2.3);
2817         nodes[2].node.claim_funds(our_payment_preimage);
2818         expect_payment_claimed!(nodes[2], payment_hash_1, 3_000_000);
2819         nodes[2].node.claim_funds(our_payment_preimage_2);
2820         expect_payment_claimed!(nodes[2], payment_hash_2, 3_000_000);
2821         check_added_monitors!(nodes[2], 2);
2822         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2823         assert!(updates.update_add_htlcs.is_empty());
2824         assert!(updates.update_fail_htlcs.is_empty());
2825         assert!(updates.update_fail_malformed_htlcs.is_empty());
2826         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2827
2828         mine_transaction(&nodes[2], &commitment_tx[0]);
2829         check_closed_broadcast!(nodes[2], true);
2830         check_added_monitors!(nodes[2], 1);
2831         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2832         let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 2 (2 * HTLC-Success tx)
2833         assert_eq!(node_txn.len(), 2);
2834         check_spends!(node_txn[0], commitment_tx[0]);
2835         check_spends!(node_txn[1], commitment_tx[0]);
2836         assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2837         assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2838         assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2839         assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2840         assert_eq!(node_txn[0].lock_time.0, 0);
2841         assert_eq!(node_txn[1].lock_time.0, 0);
2842
2843         // Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward
2844         connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), node_txn[0].clone(), node_txn[1].clone()]));
2845         connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
2846         {
2847                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
2848                 assert_eq!(added_monitors.len(), 1);
2849                 assert_eq!(added_monitors[0].0.txid, chan_2.3.txid());
2850                 added_monitors.clear();
2851         }
2852         let forwarded_events = nodes[1].node.get_and_clear_pending_events();
2853         assert_eq!(forwarded_events.len(), 3);
2854         match forwarded_events[0] {
2855                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
2856                 _ => panic!("Unexpected event"),
2857         }
2858         let chan_id = Some(chan_1.2);
2859         match forwarded_events[1] {
2860                 Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => {
2861                         assert_eq!(fee_earned_msat, Some(1000));
2862                         assert_eq!(prev_channel_id, chan_id);
2863                         assert_eq!(claim_from_onchain_tx, true);
2864                         assert_eq!(next_channel_id, Some(chan_2.2));
2865                         assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
2866                 },
2867                 _ => panic!()
2868         }
2869         match forwarded_events[2] {
2870                 Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => {
2871                         assert_eq!(fee_earned_msat, Some(1000));
2872                         assert_eq!(prev_channel_id, chan_id);
2873                         assert_eq!(claim_from_onchain_tx, true);
2874                         assert_eq!(next_channel_id, Some(chan_2.2));
2875                         assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
2876                 },
2877                 _ => panic!()
2878         }
2879         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
2880         {
2881                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
2882                 assert_eq!(added_monitors.len(), 2);
2883                 assert_eq!(added_monitors[0].0.txid, chan_1.3.txid());
2884                 assert_eq!(added_monitors[1].0.txid, chan_1.3.txid());
2885                 added_monitors.clear();
2886         }
2887         assert_eq!(events.len(), 3);
2888
2889         let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
2890         let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events);
2891
2892         match nodes_2_event {
2893                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id: _ } => {},
2894                 _ => panic!("Unexpected event"),
2895         }
2896
2897         match nodes_0_event {
2898                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
2899                         assert!(update_add_htlcs.is_empty());
2900                         assert!(update_fail_htlcs.is_empty());
2901                         assert_eq!(update_fulfill_htlcs.len(), 1);
2902                         assert!(update_fail_malformed_htlcs.is_empty());
2903                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
2904                 },
2905                 _ => panic!("Unexpected event"),
2906         };
2907
2908         // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
2909         match events[0] {
2910                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
2911                 _ => panic!("Unexpected event"),
2912         }
2913
2914         macro_rules! check_tx_local_broadcast {
2915                 ($node: expr, $htlc_offered: expr, $commitment_tx: expr) => { {
2916                         let mut node_txn = $node.tx_broadcaster.txn_broadcasted.lock().unwrap();
2917                         assert_eq!(node_txn.len(), 2);
2918                         // Node[1]: 2 * HTLC-timeout tx
2919                         // Node[0]: 2 * HTLC-timeout tx
2920                         check_spends!(node_txn[0], $commitment_tx);
2921                         check_spends!(node_txn[1], $commitment_tx);
2922                         assert_ne!(node_txn[0].lock_time.0, 0);
2923                         assert_ne!(node_txn[1].lock_time.0, 0);
2924                         if $htlc_offered {
2925                                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2926                                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2927                                 assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2928                                 assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2929                         } else {
2930                                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2931                                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2932                                 assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
2933                                 assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
2934                         }
2935                         node_txn.clear();
2936                 } }
2937         }
2938         // nodes[1] now broadcasts its own timeout-claim of the output that nodes[2] just claimed via success.
2939         check_tx_local_broadcast!(nodes[1], false, commitment_tx[0]);
2940
2941         // Broadcast legit commitment tx from A on B's chain
2942         // Broadcast preimage tx by B on offered output from A commitment tx  on A's chain
2943         let node_a_commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
2944         check_spends!(node_a_commitment_tx[0], chan_1.3);
2945         mine_transaction(&nodes[1], &node_a_commitment_tx[0]);
2946         check_closed_broadcast!(nodes[1], true);
2947         check_added_monitors!(nodes[1], 1);
2948         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2949         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
2950         assert!(node_txn.len() == 1 || node_txn.len() == 3); // HTLC-Success, 2* RBF bumps of above HTLC txn
2951         let commitment_spend =
2952                 if node_txn.len() == 1 {
2953                         &node_txn[0]
2954                 } else {
2955                         // Certain `ConnectStyle`s will cause RBF bumps of the previous HTLC transaction to be broadcast.
2956                         // FullBlockViaListen
2957                         if node_txn[0].input[0].previous_output.txid == node_a_commitment_tx[0].txid() {
2958                                 check_spends!(node_txn[1], commitment_tx[0]);
2959                                 check_spends!(node_txn[2], commitment_tx[0]);
2960                                 assert_ne!(node_txn[1].input[0].previous_output.vout, node_txn[2].input[0].previous_output.vout);
2961                                 &node_txn[0]
2962                         } else {
2963                                 check_spends!(node_txn[0], commitment_tx[0]);
2964                                 check_spends!(node_txn[1], commitment_tx[0]);
2965                                 assert_ne!(node_txn[0].input[0].previous_output.vout, node_txn[1].input[0].previous_output.vout);
2966                                 &node_txn[2]
2967                         }
2968                 };
2969
2970         check_spends!(commitment_spend, node_a_commitment_tx[0]);
2971         assert_eq!(commitment_spend.input.len(), 2);
2972         assert_eq!(commitment_spend.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2973         assert_eq!(commitment_spend.input[1].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2974         assert_eq!(commitment_spend.lock_time.0, nodes[1].best_block_info().1);
2975         assert!(commitment_spend.output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
2976         // We don't bother to check that B can claim the HTLC output on its commitment tx here as
2977         // we already checked the same situation with A.
2978
2979         // Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent
2980         connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_a_commitment_tx[0].clone(), commitment_spend.clone()]));
2981         connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
2982         check_closed_broadcast!(nodes[0], true);
2983         check_added_monitors!(nodes[0], 1);
2984         let events = nodes[0].node.get_and_clear_pending_events();
2985         assert_eq!(events.len(), 5);
2986         let mut first_claimed = false;
2987         for event in events {
2988                 match event {
2989                         Event::PaymentSent { payment_preimage, payment_hash, .. } => {
2990                                 if payment_preimage == our_payment_preimage && payment_hash == payment_hash_1 {
2991                                         assert!(!first_claimed);
2992                                         first_claimed = true;
2993                                 } else {
2994                                         assert_eq!(payment_preimage, our_payment_preimage_2);
2995                                         assert_eq!(payment_hash, payment_hash_2);
2996                                 }
2997                         },
2998                         Event::PaymentPathSuccessful { .. } => {},
2999                         Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {},
3000                         _ => panic!("Unexpected event"),
3001                 }
3002         }
3003         check_tx_local_broadcast!(nodes[0], true, node_a_commitment_tx[0]);
3004 }
3005
3006 fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
3007         // Test that in case of a unilateral close onchain, we detect the state of output and
3008         // timeout the HTLC backward accordingly. So here we test that ChannelManager is
3009         // broadcasting the right event to other nodes in payment path.
3010         // A ------------------> B ----------------------> C (timeout)
3011         //    B's commitment tx                 C's commitment tx
3012         //            \                                  \
3013         //         B's HTLC timeout tx               B's timeout tx
3014
3015         let chanmon_cfgs = create_chanmon_cfgs(3);
3016         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3017         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3018         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3019         *nodes[0].connect_style.borrow_mut() = connect_style;
3020         *nodes[1].connect_style.borrow_mut() = connect_style;
3021         *nodes[2].connect_style.borrow_mut() = connect_style;
3022
3023         // Create some intial channels
3024         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
3025         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3026
3027         // Rebalance the network a bit by relaying one payment thorugh all the channels...
3028         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
3029         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
3030
3031         let (_payment_preimage, payment_hash, _payment_secret) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
3032
3033         // Broadcast legit commitment tx from C on B's chain
3034         let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
3035         check_spends!(commitment_tx[0], chan_2.3);
3036         nodes[2].node.fail_htlc_backwards(&payment_hash);
3037         check_added_monitors!(nodes[2], 0);
3038         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]);
3039         check_added_monitors!(nodes[2], 1);
3040
3041         let events = nodes[2].node.get_and_clear_pending_msg_events();
3042         assert_eq!(events.len(), 1);
3043         match events[0] {
3044                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3045                         assert!(update_add_htlcs.is_empty());
3046                         assert!(!update_fail_htlcs.is_empty());
3047                         assert!(update_fulfill_htlcs.is_empty());
3048                         assert!(update_fail_malformed_htlcs.is_empty());
3049                         assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
3050                 },
3051                 _ => panic!("Unexpected event"),
3052         };
3053         mine_transaction(&nodes[2], &commitment_tx[0]);
3054         check_closed_broadcast!(nodes[2], true);
3055         check_added_monitors!(nodes[2], 1);
3056         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
3057         let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
3058         assert_eq!(node_txn.len(), 0);
3059
3060         // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain
3061         // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
3062         mine_transaction(&nodes[1], &commitment_tx[0]);
3063         check_closed_event!(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false
3064                 , [nodes[2].node.get_our_node_id()], 100000);
3065         connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1);
3066         let timeout_tx = {
3067                 let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
3068                 if nodes[1].connect_style.borrow().skips_blocks() {
3069                         assert_eq!(txn.len(), 1);
3070                 } else {
3071                         assert_eq!(txn.len(), 3); // Two extra fee bumps for timeout transaction
3072                 }
3073                 txn.iter().for_each(|tx| check_spends!(tx, commitment_tx[0]));
3074                 assert_eq!(txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3075                 txn.remove(0)
3076         };
3077
3078         mine_transaction(&nodes[1], &timeout_tx);
3079         check_added_monitors!(nodes[1], 1);
3080         check_closed_broadcast!(nodes[1], true);
3081
3082         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3083
3084         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
3085         check_added_monitors!(nodes[1], 1);
3086         let events = nodes[1].node.get_and_clear_pending_msg_events();
3087         assert_eq!(events.len(), 1);
3088         match events[0] {
3089                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3090                         assert!(update_add_htlcs.is_empty());
3091                         assert!(!update_fail_htlcs.is_empty());
3092                         assert!(update_fulfill_htlcs.is_empty());
3093                         assert!(update_fail_malformed_htlcs.is_empty());
3094                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3095                 },
3096                 _ => panic!("Unexpected event"),
3097         };
3098
3099         // Broadcast legit commitment tx from B on A's chain
3100         let commitment_tx = get_local_commitment_txn!(nodes[1], chan_1.2);
3101         check_spends!(commitment_tx[0], chan_1.3);
3102
3103         mine_transaction(&nodes[0], &commitment_tx[0]);
3104         connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
3105
3106         check_closed_broadcast!(nodes[0], true);
3107         check_added_monitors!(nodes[0], 1);
3108         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
3109         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // 1 timeout tx
3110         assert_eq!(node_txn.len(), 1);
3111         check_spends!(node_txn[0], commitment_tx[0]);
3112         assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3113 }
3114
3115 #[test]
3116 fn test_htlc_on_chain_timeout() {
3117         do_test_htlc_on_chain_timeout(ConnectStyle::BestBlockFirstSkippingBlocks);
3118         do_test_htlc_on_chain_timeout(ConnectStyle::TransactionsFirstSkippingBlocks);
3119         do_test_htlc_on_chain_timeout(ConnectStyle::FullBlockViaListen);
3120 }
3121
3122 #[test]
3123 fn test_simple_commitment_revoked_fail_backward() {
3124         // Test that in case of a revoked commitment tx, we detect the resolution of output by justice tx
3125         // and fail backward accordingly.
3126
3127         let chanmon_cfgs = create_chanmon_cfgs(3);
3128         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3129         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3130         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3131
3132         // Create some initial channels
3133         create_announced_chan_between_nodes(&nodes, 0, 1);
3134         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3135
3136         let (payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
3137         // Get the will-be-revoked local txn from nodes[2]
3138         let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
3139         // Revoke the old state
3140         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
3141
3142         let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
3143
3144         mine_transaction(&nodes[1], &revoked_local_txn[0]);
3145         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
3146         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3147         check_added_monitors!(nodes[1], 1);
3148         check_closed_broadcast!(nodes[1], true);
3149
3150         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
3151         check_added_monitors!(nodes[1], 1);
3152         let events = nodes[1].node.get_and_clear_pending_msg_events();
3153         assert_eq!(events.len(), 1);
3154         match events[0] {
3155                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
3156                         assert!(update_add_htlcs.is_empty());
3157                         assert_eq!(update_fail_htlcs.len(), 1);
3158                         assert!(update_fulfill_htlcs.is_empty());
3159                         assert!(update_fail_malformed_htlcs.is_empty());
3160                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3161
3162                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
3163                         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
3164                         expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_2.0.contents.short_channel_id, true);
3165                 },
3166                 _ => panic!("Unexpected event"),
3167         }
3168 }
3169
3170 fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use_dust: bool, no_to_remote: bool) {
3171         // Test that if our counterparty broadcasts a revoked commitment transaction we fail all
3172         // pending HTLCs on that channel backwards even if the HTLCs aren't present in our latest
3173         // commitment transaction anymore.
3174         // To do this, we have the peer which will broadcast a revoked commitment transaction send
3175         // a number of update_fail/commitment_signed updates without ever sending the RAA in
3176         // response to our commitment_signed. This is somewhat misbehavior-y, though not
3177         // technically disallowed and we should probably handle it reasonably.
3178         // Note that this is pretty exhaustive as an outbound HTLC which we haven't yet
3179         // failed/fulfilled backwards must be in at least one of the latest two remote commitment
3180         // transactions:
3181         // * Once we move it out of our holding cell/add it, we will immediately include it in a
3182         //   commitment_signed (implying it will be in the latest remote commitment transaction).
3183         // * Once they remove it, we will send a (the first) commitment_signed without the HTLC,
3184         //   and once they revoke the previous commitment transaction (allowing us to send a new
3185         //   commitment_signed) we will be free to fail/fulfill the HTLC backwards.
3186         let chanmon_cfgs = create_chanmon_cfgs(3);
3187         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3188         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3189         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3190
3191         // Create some initial channels
3192         create_announced_chan_between_nodes(&nodes, 0, 1);
3193         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3194
3195         let (payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], if no_to_remote { 10_000 } else { 3_000_000 });
3196         // Get the will-be-revoked local txn from nodes[2]
3197         let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
3198         assert_eq!(revoked_local_txn[0].output.len(), if no_to_remote { 1 } else { 2 });
3199         // Revoke the old state
3200         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
3201
3202         let value = if use_dust {
3203                 // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as
3204                 // well, so HTLCs at exactly the dust limit will not be included in commitment txn.
3205                 nodes[2].node.per_peer_state.read().unwrap().get(&nodes[1].node.get_our_node_id())
3206                         .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context.holder_dust_limit_satoshis * 1000
3207         } else { 3000000 };
3208
3209         let (_, first_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3210         let (_, second_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3211         let (_, third_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3212
3213         nodes[2].node.fail_htlc_backwards(&first_payment_hash);
3214         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]);
3215         check_added_monitors!(nodes[2], 1);
3216         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3217         assert!(updates.update_add_htlcs.is_empty());
3218         assert!(updates.update_fulfill_htlcs.is_empty());
3219         assert!(updates.update_fail_malformed_htlcs.is_empty());
3220         assert_eq!(updates.update_fail_htlcs.len(), 1);
3221         assert!(updates.update_fee.is_none());
3222         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3223         let bs_raa = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
3224         // Drop the last RAA from 3 -> 2
3225
3226         nodes[2].node.fail_htlc_backwards(&second_payment_hash);
3227         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: second_payment_hash }]);
3228         check_added_monitors!(nodes[2], 1);
3229         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3230         assert!(updates.update_add_htlcs.is_empty());
3231         assert!(updates.update_fulfill_htlcs.is_empty());
3232         assert!(updates.update_fail_malformed_htlcs.is_empty());
3233         assert_eq!(updates.update_fail_htlcs.len(), 1);
3234         assert!(updates.update_fee.is_none());
3235         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3236         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
3237         check_added_monitors!(nodes[1], 1);
3238         // Note that nodes[1] is in AwaitingRAA, so won't send a CS
3239         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
3240         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
3241         check_added_monitors!(nodes[2], 1);
3242
3243         nodes[2].node.fail_htlc_backwards(&third_payment_hash);
3244         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: third_payment_hash }]);
3245         check_added_monitors!(nodes[2], 1);
3246         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3247         assert!(updates.update_add_htlcs.is_empty());
3248         assert!(updates.update_fulfill_htlcs.is_empty());
3249         assert!(updates.update_fail_malformed_htlcs.is_empty());
3250         assert_eq!(updates.update_fail_htlcs.len(), 1);
3251         assert!(updates.update_fee.is_none());
3252         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3253         // At this point first_payment_hash has dropped out of the latest two commitment
3254         // transactions that nodes[1] is tracking...
3255         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
3256         check_added_monitors!(nodes[1], 1);
3257         // Note that nodes[1] is (still) in AwaitingRAA, so won't send a CS
3258         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
3259         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
3260         check_added_monitors!(nodes[2], 1);
3261
3262         // Add a fourth HTLC, this one will get sequestered away in nodes[1]'s holding cell waiting
3263         // on nodes[2]'s RAA.
3264         let (route, fourth_payment_hash, _, fourth_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 1000000);
3265         nodes[1].node.send_payment_with_route(&route, fourth_payment_hash,
3266                 RecipientOnionFields::secret_only(fourth_payment_secret), PaymentId(fourth_payment_hash.0)).unwrap();
3267         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3268         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3269         check_added_monitors!(nodes[1], 0);
3270
3271         if deliver_bs_raa {
3272                 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_raa);
3273                 // One monitor for the new revocation preimage, no second on as we won't generate a new
3274                 // commitment transaction for nodes[0] until process_pending_htlc_forwards().
3275                 check_added_monitors!(nodes[1], 1);
3276                 let events = nodes[1].node.get_and_clear_pending_events();
3277                 assert_eq!(events.len(), 2);
3278                 match events[0] {
3279                         Event::PendingHTLCsForwardable { .. } => { },
3280                         _ => panic!("Unexpected event"),
3281                 };
3282                 match events[1] {
3283                         Event::HTLCHandlingFailed { .. } => { },
3284                         _ => panic!("Unexpected event"),
3285                 }
3286                 // Deliberately don't process the pending fail-back so they all fail back at once after
3287                 // block connection just like the !deliver_bs_raa case
3288         }
3289
3290         let mut failed_htlcs = HashSet::new();
3291         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3292
3293         mine_transaction(&nodes[1], &revoked_local_txn[0]);
3294         check_added_monitors!(nodes[1], 1);
3295         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3296
3297         let events = nodes[1].node.get_and_clear_pending_events();
3298         assert_eq!(events.len(), if deliver_bs_raa { 3 + nodes.len() - 1 } else { 4 + nodes.len() });
3299         match events[0] {
3300                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { },
3301                 _ => panic!("Unexepected event"),
3302         }
3303         match events[1] {
3304                 Event::PaymentPathFailed { ref payment_hash, .. } => {
3305                         assert_eq!(*payment_hash, fourth_payment_hash);
3306                 },
3307                 _ => panic!("Unexpected event"),
3308         }
3309         match events[2] {
3310                 Event::PaymentFailed { ref payment_hash, .. } => {
3311                         assert_eq!(*payment_hash, fourth_payment_hash);
3312                 },
3313                 _ => panic!("Unexpected event"),
3314         }
3315
3316         nodes[1].node.process_pending_htlc_forwards();
3317         check_added_monitors!(nodes[1], 1);
3318
3319         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
3320         assert_eq!(events.len(), if deliver_bs_raa { 4 } else { 3 });
3321
3322         if deliver_bs_raa {
3323                 let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
3324                 match nodes_2_event {
3325                         MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3326                                 assert_eq!(nodes[2].node.get_our_node_id(), *node_id);
3327                                 assert_eq!(update_add_htlcs.len(), 1);
3328                                 assert!(update_fulfill_htlcs.is_empty());
3329                                 assert!(update_fail_htlcs.is_empty());
3330                                 assert!(update_fail_malformed_htlcs.is_empty());
3331                         },
3332                         _ => panic!("Unexpected event"),
3333                 }
3334         }
3335
3336         let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
3337         match nodes_2_event {
3338                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id, ref data } }, node_id: _ } => {
3339                         assert_eq!(channel_id, chan_2.2);
3340                         assert_eq!(data.as_str(), "Channel closed because commitment or closing transaction was confirmed on chain.");
3341                 },
3342                 _ => panic!("Unexpected event"),
3343         }
3344
3345         let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events);
3346         match nodes_0_event {
3347                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
3348                         assert!(update_add_htlcs.is_empty());
3349                         assert_eq!(update_fail_htlcs.len(), 3);
3350                         assert!(update_fulfill_htlcs.is_empty());
3351                         assert!(update_fail_malformed_htlcs.is_empty());
3352                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3353
3354                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
3355                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[1]);
3356                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[2]);
3357
3358                         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
3359
3360                         let events = nodes[0].node.get_and_clear_pending_events();
3361                         assert_eq!(events.len(), 6);
3362                         match events[0] {
3363                                 Event::PaymentPathFailed { ref payment_hash, ref failure, .. } => {
3364                                         assert!(failed_htlcs.insert(payment_hash.0));
3365                                         // If we delivered B's RAA we got an unknown preimage error, not something
3366                                         // that we should update our routing table for.
3367                                         if !deliver_bs_raa {
3368                                                 if let PathFailure::OnPath { network_update: Some(_) } = failure { } else { panic!("Unexpected path failure") }
3369                                         }
3370                                 },
3371                                 _ => panic!("Unexpected event"),
3372                         }
3373                         match events[1] {
3374                                 Event::PaymentFailed { ref payment_hash, .. } => {
3375                                         assert_eq!(*payment_hash, first_payment_hash);
3376                                 },
3377                                 _ => panic!("Unexpected event"),
3378                         }
3379                         match events[2] {
3380                                 Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => {
3381                                         assert!(failed_htlcs.insert(payment_hash.0));
3382                                 },
3383                                 _ => panic!("Unexpected event"),
3384                         }
3385                         match events[3] {
3386                                 Event::PaymentFailed { ref payment_hash, .. } => {
3387                                         assert_eq!(*payment_hash, second_payment_hash);
3388                                 },
3389                                 _ => panic!("Unexpected event"),
3390                         }
3391                         match events[4] {
3392                                 Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => {
3393                                         assert!(failed_htlcs.insert(payment_hash.0));
3394                                 },
3395                                 _ => panic!("Unexpected event"),
3396                         }
3397                         match events[5] {
3398                                 Event::PaymentFailed { ref payment_hash, .. } => {
3399                                         assert_eq!(*payment_hash, third_payment_hash);
3400                                 },
3401                                 _ => panic!("Unexpected event"),
3402                         }
3403                 },
3404                 _ => panic!("Unexpected event"),
3405         }
3406
3407         // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
3408         match events[0] {
3409                 MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {},
3410                 _ => panic!("Unexpected event"),
3411         }
3412
3413         assert!(failed_htlcs.contains(&first_payment_hash.0));
3414         assert!(failed_htlcs.contains(&second_payment_hash.0));
3415         assert!(failed_htlcs.contains(&third_payment_hash.0));
3416 }
3417
3418 #[test]
3419 fn test_commitment_revoked_fail_backward_exhaustive_a() {
3420         do_test_commitment_revoked_fail_backward_exhaustive(false, true, false);
3421         do_test_commitment_revoked_fail_backward_exhaustive(true, true, false);
3422         do_test_commitment_revoked_fail_backward_exhaustive(false, false, false);
3423         do_test_commitment_revoked_fail_backward_exhaustive(true, false, false);
3424 }
3425
3426 #[test]
3427 fn test_commitment_revoked_fail_backward_exhaustive_b() {
3428         do_test_commitment_revoked_fail_backward_exhaustive(false, true, true);
3429         do_test_commitment_revoked_fail_backward_exhaustive(true, true, true);
3430         do_test_commitment_revoked_fail_backward_exhaustive(false, false, true);
3431         do_test_commitment_revoked_fail_backward_exhaustive(true, false, true);
3432 }
3433
3434 #[test]
3435 fn fail_backward_pending_htlc_upon_channel_failure() {
3436         let chanmon_cfgs = create_chanmon_cfgs(2);
3437         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3438         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3439         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3440         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000);
3441
3442         // Alice -> Bob: Route a payment but without Bob sending revoke_and_ack.
3443         {
3444                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000);
3445                 nodes[0].node.send_payment_with_route(&route, payment_hash, RecipientOnionFields::secret_only(payment_secret),
3446                         PaymentId(payment_hash.0)).unwrap();
3447                 check_added_monitors!(nodes[0], 1);
3448
3449                 let payment_event = {
3450                         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3451                         assert_eq!(events.len(), 1);
3452                         SendEvent::from_event(events.remove(0))
3453                 };
3454                 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
3455                 assert_eq!(payment_event.msgs.len(), 1);
3456         }
3457
3458         // Alice -> Bob: Route another payment but now Alice waits for Bob's earlier revoke_and_ack.
3459         let (route, failed_payment_hash, _, failed_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000);
3460         {
3461                 nodes[0].node.send_payment_with_route(&route, failed_payment_hash,
3462                         RecipientOnionFields::secret_only(failed_payment_secret), PaymentId(failed_payment_hash.0)).unwrap();
3463                 check_added_monitors!(nodes[0], 0);
3464
3465                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3466         }
3467
3468         // Alice <- Bob: Send a malformed update_add_htlc so Alice fails the channel.
3469         {
3470                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 50_000);
3471
3472                 let secp_ctx = Secp256k1::new();
3473                 let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
3474                 let current_height = nodes[1].node.best_block.read().unwrap().height() + 1;
3475                 let (onion_payloads, _amount_msat, cltv_expiry) = onion_utils::build_onion_payloads(
3476                         &route.paths[0], 50_000, RecipientOnionFields::secret_only(payment_secret), current_height, &None).unwrap();
3477                 let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
3478                 let onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
3479
3480                 // Send a 0-msat update_add_htlc to fail the channel.
3481                 let update_add_htlc = msgs::UpdateAddHTLC {
3482                         channel_id: chan.2,
3483                         htlc_id: 0,
3484                         amount_msat: 0,
3485                         payment_hash,
3486                         cltv_expiry,
3487                         onion_routing_packet,
3488                         skimmed_fee_msat: None,
3489                 };
3490                 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc);
3491         }
3492         let events = nodes[0].node.get_and_clear_pending_events();
3493         assert_eq!(events.len(), 3);
3494         // Check that Alice fails backward the pending HTLC from the second payment.
3495         match events[0] {
3496                 Event::PaymentPathFailed { payment_hash, .. } => {
3497                         assert_eq!(payment_hash, failed_payment_hash);
3498                 },
3499                 _ => panic!("Unexpected event"),
3500         }
3501         match events[1] {
3502                 Event::PaymentFailed { payment_hash, .. } => {
3503                         assert_eq!(payment_hash, failed_payment_hash);
3504                 },
3505                 _ => panic!("Unexpected event"),
3506         }
3507         match events[2] {
3508                 Event::ChannelClosed { reason: ClosureReason::ProcessingError { ref err }, .. } => {
3509                         assert_eq!(err, "Remote side tried to send a 0-msat HTLC");
3510                 },
3511                 _ => panic!("Unexpected event {:?}", events[1]),
3512         }
3513         check_closed_broadcast!(nodes[0], true);
3514         check_added_monitors!(nodes[0], 1);
3515 }
3516
3517 #[test]
3518 fn test_htlc_ignore_latest_remote_commitment() {
3519         // Test that HTLC transactions spending the latest remote commitment transaction are simply
3520         // ignored if we cannot claim them. This originally tickled an invalid unwrap().
3521         let chanmon_cfgs = create_chanmon_cfgs(2);
3522         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3523         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3524         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3525         if *nodes[1].connect_style.borrow() == ConnectStyle::FullBlockViaListen {
3526                 // We rely on the ability to connect a block redundantly, which isn't allowed via
3527                 // `chain::Listen`, so we never run the test if we randomly get assigned that
3528                 // connect_style.
3529                 return;
3530         }
3531         create_announced_chan_between_nodes(&nodes, 0, 1);
3532
3533         route_payment(&nodes[0], &[&nodes[1]], 10000000);
3534         nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
3535         connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
3536         check_closed_broadcast!(nodes[0], true);
3537         check_added_monitors!(nodes[0], 1);
3538         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
3539
3540         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3541         assert_eq!(node_txn.len(), 3);
3542         assert_eq!(node_txn[0].txid(), node_txn[1].txid());
3543
3544         let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[1].clone()]);
3545         connect_block(&nodes[1], &block);
3546         check_closed_broadcast!(nodes[1], true);
3547         check_added_monitors!(nodes[1], 1);
3548         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
3549
3550         // Duplicate the connect_block call since this may happen due to other listeners
3551         // registering new transactions
3552         connect_block(&nodes[1], &block);
3553 }
3554
3555 #[test]
3556 fn test_force_close_fail_back() {
3557         // Check which HTLCs are failed-backwards on channel force-closure
3558         let chanmon_cfgs = create_chanmon_cfgs(3);
3559         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3560         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3561         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3562         create_announced_chan_between_nodes(&nodes, 0, 1);
3563         create_announced_chan_between_nodes(&nodes, 1, 2);
3564
3565         let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000);
3566
3567         let mut payment_event = {
3568                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
3569                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
3570                 check_added_monitors!(nodes[0], 1);
3571
3572                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3573                 assert_eq!(events.len(), 1);
3574                 SendEvent::from_event(events.remove(0))
3575         };
3576
3577         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
3578         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
3579
3580         expect_pending_htlcs_forwardable!(nodes[1]);
3581
3582         let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
3583         assert_eq!(events_2.len(), 1);
3584         payment_event = SendEvent::from_event(events_2.remove(0));
3585         assert_eq!(payment_event.msgs.len(), 1);
3586
3587         check_added_monitors!(nodes[1], 1);
3588         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
3589         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg);
3590         check_added_monitors!(nodes[2], 1);
3591         let (_, _) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3592
3593         // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous
3594         // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
3595         // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
3596
3597         nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
3598         check_closed_broadcast!(nodes[2], true);
3599         check_added_monitors!(nodes[2], 1);
3600         check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
3601         let tx = {
3602                 let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
3603                 // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
3604                 // have a use for it unless nodes[2] learns the preimage somehow, the funds will go
3605                 // back to nodes[1] upon timeout otherwise.
3606                 assert_eq!(node_txn.len(), 1);
3607                 node_txn.remove(0)
3608         };
3609
3610         mine_transaction(&nodes[1], &tx);
3611
3612         // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
3613         check_closed_broadcast!(nodes[1], true);
3614         check_added_monitors!(nodes[1], 1);
3615         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
3616
3617         // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
3618         {
3619                 get_monitor!(nodes[2], payment_event.commitment_msg.channel_id)
3620                         .provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator), &node_cfgs[2].logger);
3621         }
3622         mine_transaction(&nodes[2], &tx);
3623         let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
3624         assert_eq!(node_txn.len(), 1);
3625         assert_eq!(node_txn[0].input.len(), 1);
3626         assert_eq!(node_txn[0].input[0].previous_output.txid, tx.txid());
3627         assert_eq!(node_txn[0].lock_time.0, 0); // Must be an HTLC-Success
3628         assert_eq!(node_txn[0].input[0].witness.len(), 5); // Must be an HTLC-Success
3629
3630         check_spends!(node_txn[0], tx);
3631 }
3632
3633 #[test]
3634 fn test_dup_events_on_peer_disconnect() {
3635         // Test that if we receive a duplicative update_fulfill_htlc message after a reconnect we do
3636         // not generate a corresponding duplicative PaymentSent event. This did not use to be the case
3637         // as we used to generate the event immediately upon receipt of the payment preimage in the
3638         // update_fulfill_htlc message.
3639
3640         let chanmon_cfgs = create_chanmon_cfgs(2);
3641         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3642         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3643         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3644         create_announced_chan_between_nodes(&nodes, 0, 1);
3645
3646         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
3647
3648         nodes[1].node.claim_funds(payment_preimage);
3649         expect_payment_claimed!(nodes[1], payment_hash, 1_000_000);
3650         check_added_monitors!(nodes[1], 1);
3651         let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
3652         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]);
3653         expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
3654
3655         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3656         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3657
3658         let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3659         reconnect_args.pending_htlc_claims.0 = 1;
3660         reconnect_nodes(reconnect_args);
3661         expect_payment_path_successful!(nodes[0]);
3662 }
3663
3664 #[test]
3665 fn test_peer_disconnected_before_funding_broadcasted() {
3666         // Test that channels are closed with `ClosureReason::DisconnectedPeer` if the peer disconnects
3667         // before the funding transaction has been broadcasted.
3668         let chanmon_cfgs = create_chanmon_cfgs(2);
3669         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3670         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3671         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3672
3673         // Open a channel between `nodes[0]` and `nodes[1]`, for which the funding transaction is never
3674         // broadcasted, even though it's created by `nodes[0]`.
3675         let expected_temporary_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
3676         let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
3677         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
3678         let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
3679         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
3680
3681         let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
3682         assert_eq!(temporary_channel_id, expected_temporary_channel_id);
3683
3684         assert!(nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
3685
3686         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
3687         assert_eq!(funding_created_msg.temporary_channel_id, expected_temporary_channel_id);
3688
3689         // Even though the funding transaction is created by `nodes[0]`, the `FundingCreated` msg is
3690         // never sent to `nodes[1]`, and therefore the tx is never signed by either party nor
3691         // broadcasted.
3692         {
3693                 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
3694         }
3695
3696         // Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` when the peers are
3697         // disconnected before the funding transaction was broadcasted.
3698         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3699         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3700
3701         check_closed_event!(&nodes[0], 1, ClosureReason::DisconnectedPeer, false
3702                 , [nodes[1].node.get_our_node_id()], 1000000);
3703         check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false
3704                 , [nodes[0].node.get_our_node_id()], 1000000);
3705 }
3706
3707 #[test]
3708 fn test_simple_peer_disconnect() {
3709         // Test that we can reconnect when there are no lost messages
3710         let chanmon_cfgs = create_chanmon_cfgs(3);
3711         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3712         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3713         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3714         create_announced_chan_between_nodes(&nodes, 0, 1);
3715         create_announced_chan_between_nodes(&nodes, 1, 2);
3716
3717         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3718         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3719         let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3720         reconnect_args.send_channel_ready = (true, true);
3721         reconnect_nodes(reconnect_args);
3722
3723         let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
3724         let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3725         fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2);
3726         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1);
3727
3728         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3729         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3730         reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3731
3732         let (payment_preimage_3, payment_hash_3, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000);
3733         let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
3734         let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3735         let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3736
3737         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3738         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3739
3740         claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_preimage_3);
3741         fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5);
3742
3743         let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3744         reconnect_args.pending_cell_htlc_fails.0 = 1;
3745         reconnect_args.pending_cell_htlc_claims.0 = 1;
3746         reconnect_nodes(reconnect_args);
3747         {
3748                 let events = nodes[0].node.get_and_clear_pending_events();
3749                 assert_eq!(events.len(), 4);
3750                 match events[0] {
3751                         Event::PaymentSent { payment_preimage, payment_hash, .. } => {
3752                                 assert_eq!(payment_preimage, payment_preimage_3);
3753                                 assert_eq!(payment_hash, payment_hash_3);
3754                         },
3755                         _ => panic!("Unexpected event"),
3756                 }
3757                 match events[1] {
3758                         Event::PaymentPathSuccessful { .. } => {},
3759                         _ => panic!("Unexpected event"),
3760                 }
3761                 match events[2] {
3762                         Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } => {
3763                                 assert_eq!(payment_hash, payment_hash_5);
3764                                 assert!(payment_failed_permanently);
3765                         },
3766                         _ => panic!("Unexpected event"),
3767                 }
3768                 match events[3] {
3769                         Event::PaymentFailed { payment_hash, .. } => {
3770                                 assert_eq!(payment_hash, payment_hash_5);
3771                         },
3772                         _ => panic!("Unexpected event"),
3773                 }
3774         }
3775         check_added_monitors(&nodes[0], 1);
3776
3777         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4);
3778         fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6);
3779 }
3780
3781 fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken_lnd: bool) {
3782         // Test that we can reconnect when in-flight HTLC updates get dropped
3783         let chanmon_cfgs = create_chanmon_cfgs(2);
3784         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3785         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3786         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3787
3788         let mut as_channel_ready = None;
3789         let channel_id = if messages_delivered == 0 {
3790                 let (channel_ready, chan_id, _) = create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001);
3791                 as_channel_ready = Some(channel_ready);
3792                 // nodes[1] doesn't receive the channel_ready message (it'll be re-sent on reconnect)
3793                 // Note that we store it so that if we're running with `simulate_broken_lnd` we can deliver
3794                 // it before the channel_reestablish message.
3795                 chan_id
3796         } else {
3797                 create_announced_chan_between_nodes(&nodes, 0, 1).2
3798         };
3799
3800         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000);
3801
3802         let payment_event = {
3803                 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
3804                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
3805                 check_added_monitors!(nodes[0], 1);
3806
3807                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3808                 assert_eq!(events.len(), 1);
3809                 SendEvent::from_event(events.remove(0))
3810         };
3811         assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id);
3812
3813         if messages_delivered < 2 {
3814                 // Drop the payment_event messages, and let them get re-generated in reconnect_nodes!
3815         } else {
3816                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
3817                 if messages_delivered >= 3 {
3818                         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
3819                         check_added_monitors!(nodes[1], 1);
3820                         let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
3821
3822                         if messages_delivered >= 4 {
3823                                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
3824                                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3825                                 check_added_monitors!(nodes[0], 1);
3826
3827                                 if messages_delivered >= 5 {
3828                                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
3829                                         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
3830                                         // No commitment_signed so get_event_msg's assert(len == 1) passes
3831                                         check_added_monitors!(nodes[0], 1);
3832
3833                                         if messages_delivered >= 6 {
3834                                                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
3835                                                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3836                                                 check_added_monitors!(nodes[1], 1);
3837                                         }
3838                                 }
3839                         }
3840                 }
3841         }
3842
3843         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3844         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3845         if messages_delivered < 3 {
3846                 if simulate_broken_lnd {
3847                         // lnd has a long-standing bug where they send a channel_ready prior to a
3848                         // channel_reestablish if you reconnect prior to channel_ready time.
3849                         //
3850                         // Here we simulate that behavior, delivering a channel_ready immediately on
3851                         // reconnect. Note that we don't bother skipping the now-duplicate channel_ready sent
3852                         // in `reconnect_nodes` but we currently don't fail based on that.
3853                         //
3854                         // See-also <https://github.com/lightningnetwork/lnd/issues/4006>
3855                         nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready.as_ref().unwrap().0);
3856                 }
3857                 // Even if the channel_ready messages get exchanged, as long as nothing further was
3858                 // received on either side, both sides will need to resend them.
3859                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3860                 reconnect_args.send_channel_ready = (true, true);
3861                 reconnect_args.pending_htlc_adds.1 = 1;
3862                 reconnect_nodes(reconnect_args);
3863         } else if messages_delivered == 3 {
3864                 // nodes[0] still wants its RAA + commitment_signed
3865                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3866                 reconnect_args.pending_htlc_adds.0 = -1;
3867                 reconnect_args.pending_raa.0 = true;
3868                 reconnect_nodes(reconnect_args);
3869         } else if messages_delivered == 4 {
3870                 // nodes[0] still wants its commitment_signed
3871                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3872                 reconnect_args.pending_htlc_adds.0 = -1;
3873                 reconnect_nodes(reconnect_args);
3874         } else if messages_delivered == 5 {
3875                 // nodes[1] still wants its final RAA
3876                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3877                 reconnect_args.pending_raa.1 = true;
3878                 reconnect_nodes(reconnect_args);
3879         } else if messages_delivered == 6 {
3880                 // Everything was delivered...
3881                 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3882         }
3883
3884         let events_1 = nodes[1].node.get_and_clear_pending_events();
3885         if messages_delivered == 0 {
3886                 assert_eq!(events_1.len(), 2);
3887                 match events_1[0] {
3888                         Event::ChannelReady { .. } => { },
3889                         _ => panic!("Unexpected event"),
3890                 };
3891                 match events_1[1] {
3892                         Event::PendingHTLCsForwardable { .. } => { },
3893                         _ => panic!("Unexpected event"),
3894                 };
3895         } else {
3896                 assert_eq!(events_1.len(), 1);
3897                 match events_1[0] {
3898                         Event::PendingHTLCsForwardable { .. } => { },
3899                         _ => panic!("Unexpected event"),
3900                 };
3901         }
3902
3903         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3904         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3905         reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3906
3907         nodes[1].node.process_pending_htlc_forwards();
3908
3909         let events_2 = nodes[1].node.get_and_clear_pending_events();
3910         assert_eq!(events_2.len(), 1);
3911         match events_2[0] {
3912                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
3913                         assert_eq!(payment_hash_1, *payment_hash);
3914                         assert_eq!(amount_msat, 1_000_000);
3915                         assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
3916                         assert_eq!(via_channel_id, Some(channel_id));
3917                         match &purpose {
3918                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
3919                                         assert!(payment_preimage.is_none());
3920                                         assert_eq!(payment_secret_1, *payment_secret);
3921                                 },
3922                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
3923                         }
3924                 },
3925                 _ => panic!("Unexpected event"),
3926         }
3927
3928         nodes[1].node.claim_funds(payment_preimage_1);
3929         check_added_monitors!(nodes[1], 1);
3930         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
3931
3932         let events_3 = nodes[1].node.get_and_clear_pending_msg_events();
3933         assert_eq!(events_3.len(), 1);
3934         let (update_fulfill_htlc, commitment_signed) = match events_3[0] {
3935                 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
3936                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
3937                         assert!(updates.update_add_htlcs.is_empty());
3938                         assert!(updates.update_fail_htlcs.is_empty());
3939                         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
3940                         assert!(updates.update_fail_malformed_htlcs.is_empty());
3941                         assert!(updates.update_fee.is_none());
3942                         (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
3943                 },
3944                 _ => panic!("Unexpected event"),
3945         };
3946
3947         if messages_delivered >= 1 {
3948                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlc);
3949
3950                 let events_4 = nodes[0].node.get_and_clear_pending_events();
3951                 assert_eq!(events_4.len(), 1);
3952                 match events_4[0] {
3953                         Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
3954                                 assert_eq!(payment_preimage_1, *payment_preimage);
3955                                 assert_eq!(payment_hash_1, *payment_hash);
3956                         },
3957                         _ => panic!("Unexpected event"),
3958                 }
3959
3960                 if messages_delivered >= 2 {
3961                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
3962                         check_added_monitors!(nodes[0], 1);
3963                         let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
3964
3965                         if messages_delivered >= 3 {
3966                                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
3967                                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3968                                 check_added_monitors!(nodes[1], 1);
3969
3970                                 if messages_delivered >= 4 {
3971                                         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed);
3972                                         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
3973                                         // No commitment_signed so get_event_msg's assert(len == 1) passes
3974                                         check_added_monitors!(nodes[1], 1);
3975
3976                                         if messages_delivered >= 5 {
3977                                                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
3978                                                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3979                                                 check_added_monitors!(nodes[0], 1);
3980                                         }
3981                                 }
3982                         }
3983                 }
3984         }
3985
3986         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3987         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3988         if messages_delivered < 2 {
3989                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3990                 reconnect_args.pending_htlc_claims.0 = 1;
3991                 reconnect_nodes(reconnect_args);
3992                 if messages_delivered < 1 {
3993                         expect_payment_sent!(nodes[0], payment_preimage_1);
3994                 } else {
3995                         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3996                 }
3997         } else if messages_delivered == 2 {
3998                 // nodes[0] still wants its RAA + commitment_signed
3999                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4000                 reconnect_args.pending_htlc_adds.1 = -1;
4001                 reconnect_args.pending_raa.1 = true;
4002                 reconnect_nodes(reconnect_args);
4003         } else if messages_delivered == 3 {
4004                 // nodes[0] still wants its commitment_signed
4005                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4006                 reconnect_args.pending_htlc_adds.1 = -1;
4007                 reconnect_nodes(reconnect_args);
4008         } else if messages_delivered == 4 {
4009                 // nodes[1] still wants its final RAA
4010                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4011                 reconnect_args.pending_raa.0 = true;
4012                 reconnect_nodes(reconnect_args);
4013         } else if messages_delivered == 5 {
4014                 // Everything was delivered...
4015                 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
4016         }
4017
4018         if messages_delivered == 1 || messages_delivered == 2 {
4019                 expect_payment_path_successful!(nodes[0]);
4020         }
4021         if messages_delivered <= 5 {
4022                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
4023                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
4024         }
4025         reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
4026
4027         if messages_delivered > 2 {
4028                 expect_payment_path_successful!(nodes[0]);
4029         }
4030
4031         // Channel should still work fine...
4032         let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
4033         let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
4034         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
4035 }
4036
4037 #[test]
4038 fn test_drop_messages_peer_disconnect_a() {
4039         do_test_drop_messages_peer_disconnect(0, true);
4040         do_test_drop_messages_peer_disconnect(0, false);
4041         do_test_drop_messages_peer_disconnect(1, false);
4042         do_test_drop_messages_peer_disconnect(2, false);
4043 }
4044
4045 #[test]
4046 fn test_drop_messages_peer_disconnect_b() {
4047         do_test_drop_messages_peer_disconnect(3, false);
4048         do_test_drop_messages_peer_disconnect(4, false);
4049         do_test_drop_messages_peer_disconnect(5, false);
4050         do_test_drop_messages_peer_disconnect(6, false);
4051 }
4052
4053 #[test]
4054 fn test_channel_ready_without_best_block_updated() {
4055         // Previously, if we were offline when a funding transaction was locked in, and then we came
4056         // back online, calling best_block_updated once followed by transactions_confirmed, we'd not
4057         // generate a channel_ready until a later best_block_updated. This tests that we generate the
4058         // channel_ready immediately instead.
4059         let chanmon_cfgs = create_chanmon_cfgs(2);
4060         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4061         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4062         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4063         *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
4064
4065         let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
4066
4067         let conf_height = nodes[0].best_block_info().1 + 1;
4068         connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
4069         let block_txn = [funding_tx];
4070         let conf_txn: Vec<_> = block_txn.iter().enumerate().collect();
4071         let conf_block_header = nodes[0].get_block_header(conf_height);
4072         nodes[0].node.transactions_confirmed(&conf_block_header, &conf_txn[..], conf_height);
4073
4074         // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
4075         let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
4076         nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
4077 }
4078
4079 #[test]
4080 fn test_drop_messages_peer_disconnect_dual_htlc() {
4081         // Test that we can handle reconnecting when both sides of a channel have pending
4082         // commitment_updates when we disconnect.
4083         let chanmon_cfgs = create_chanmon_cfgs(2);
4084         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4085         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4086         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4087         create_announced_chan_between_nodes(&nodes, 0, 1);
4088
4089         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
4090
4091         // Now try to send a second payment which will fail to send
4092         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
4093         nodes[0].node.send_payment_with_route(&route, payment_hash_2,
4094                 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
4095         check_added_monitors!(nodes[0], 1);
4096
4097         let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
4098         assert_eq!(events_1.len(), 1);
4099         match events_1[0] {
4100                 MessageSendEvent::UpdateHTLCs { .. } => {},
4101                 _ => panic!("Unexpected event"),
4102         }
4103
4104         nodes[1].node.claim_funds(payment_preimage_1);
4105         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
4106         check_added_monitors!(nodes[1], 1);
4107
4108         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
4109         assert_eq!(events_2.len(), 1);
4110         match events_2[0] {
4111                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
4112                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
4113                         assert!(update_add_htlcs.is_empty());
4114                         assert_eq!(update_fulfill_htlcs.len(), 1);
4115                         assert!(update_fail_htlcs.is_empty());
4116                         assert!(update_fail_malformed_htlcs.is_empty());
4117                         assert!(update_fee.is_none());
4118
4119                         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
4120                         let events_3 = nodes[0].node.get_and_clear_pending_events();
4121                         assert_eq!(events_3.len(), 1);
4122                         match events_3[0] {
4123                                 Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
4124                                         assert_eq!(*payment_preimage, payment_preimage_1);
4125                                         assert_eq!(*payment_hash, payment_hash_1);
4126                                 },
4127                                 _ => panic!("Unexpected event"),
4128                         }
4129
4130                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
4131                         let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4132                         // No commitment_signed so get_event_msg's assert(len == 1) passes
4133                         check_added_monitors!(nodes[0], 1);
4134                 },
4135                 _ => panic!("Unexpected event"),
4136         }
4137
4138         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
4139         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
4140
4141         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
4142                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
4143         }, true).unwrap();
4144         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
4145         assert_eq!(reestablish_1.len(), 1);
4146         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
4147                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
4148         }, false).unwrap();
4149         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
4150         assert_eq!(reestablish_2.len(), 1);
4151
4152         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
4153         let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
4154         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
4155         let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
4156
4157         assert!(as_resp.0.is_none());
4158         assert!(bs_resp.0.is_none());
4159
4160         assert!(bs_resp.1.is_none());
4161         assert!(bs_resp.2.is_none());
4162
4163         assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
4164
4165         assert_eq!(as_resp.2.as_ref().unwrap().update_add_htlcs.len(), 1);
4166         assert!(as_resp.2.as_ref().unwrap().update_fulfill_htlcs.is_empty());
4167         assert!(as_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
4168         assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
4169         assert!(as_resp.2.as_ref().unwrap().update_fee.is_none());
4170         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]);
4171         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed);
4172         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4173         // No commitment_signed so get_event_msg's assert(len == 1) passes
4174         check_added_monitors!(nodes[1], 1);
4175
4176         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap());
4177         let bs_second_commitment_signed = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4178         assert!(bs_second_commitment_signed.update_add_htlcs.is_empty());
4179         assert!(bs_second_commitment_signed.update_fulfill_htlcs.is_empty());
4180         assert!(bs_second_commitment_signed.update_fail_htlcs.is_empty());
4181         assert!(bs_second_commitment_signed.update_fail_malformed_htlcs.is_empty());
4182         assert!(bs_second_commitment_signed.update_fee.is_none());
4183         check_added_monitors!(nodes[1], 1);
4184
4185         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
4186         let as_commitment_signed = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4187         assert!(as_commitment_signed.update_add_htlcs.is_empty());
4188         assert!(as_commitment_signed.update_fulfill_htlcs.is_empty());
4189         assert!(as_commitment_signed.update_fail_htlcs.is_empty());
4190         assert!(as_commitment_signed.update_fail_malformed_htlcs.is_empty());
4191         assert!(as_commitment_signed.update_fee.is_none());
4192         check_added_monitors!(nodes[0], 1);
4193
4194         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed);
4195         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4196         // No commitment_signed so get_event_msg's assert(len == 1) passes
4197         check_added_monitors!(nodes[0], 1);
4198
4199         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed);
4200         let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4201         // No commitment_signed so get_event_msg's assert(len == 1) passes
4202         check_added_monitors!(nodes[1], 1);
4203
4204         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
4205         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4206         check_added_monitors!(nodes[1], 1);
4207
4208         expect_pending_htlcs_forwardable!(nodes[1]);
4209
4210         let events_5 = nodes[1].node.get_and_clear_pending_events();
4211         assert_eq!(events_5.len(), 1);
4212         match events_5[0] {
4213                 Event::PaymentClaimable { ref payment_hash, ref purpose, .. } => {
4214                         assert_eq!(payment_hash_2, *payment_hash);
4215                         match &purpose {
4216                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
4217                                         assert!(payment_preimage.is_none());
4218                                         assert_eq!(payment_secret_2, *payment_secret);
4219                                 },
4220                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
4221                         }
4222                 },
4223                 _ => panic!("Unexpected event"),
4224         }
4225
4226         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack);
4227         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4228         check_added_monitors!(nodes[0], 1);
4229
4230         expect_payment_path_successful!(nodes[0]);
4231         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
4232 }
4233
4234 fn do_test_htlc_timeout(send_partial_mpp: bool) {
4235         // If the user fails to claim/fail an HTLC within the HTLC CLTV timeout we fail it for them
4236         // to avoid our counterparty failing the channel.
4237         let chanmon_cfgs = create_chanmon_cfgs(2);
4238         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4239         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4240         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4241
4242         create_announced_chan_between_nodes(&nodes, 0, 1);
4243
4244         let our_payment_hash = if send_partial_mpp {
4245                 let (route, our_payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100000);
4246                 // Use the utility function send_payment_along_path to send the payment with MPP data which
4247                 // indicates there are more HTLCs coming.
4248                 let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
4249                 let payment_id = PaymentId([42; 32]);
4250                 let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
4251                         RecipientOnionFields::secret_only(payment_secret), payment_id, &route).unwrap();
4252                 nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash,
4253                         RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id,
4254                         &None, session_privs[0]).unwrap();
4255                 check_added_monitors!(nodes[0], 1);
4256                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
4257                 assert_eq!(events.len(), 1);
4258                 // Now do the relevant commitment_signed/RAA dances along the path, noting that the final
4259                 // hop should *not* yet generate any PaymentClaimable event(s).
4260                 pass_along_path(&nodes[0], &[&nodes[1]], 100000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
4261                 our_payment_hash
4262         } else {
4263                 route_payment(&nodes[0], &[&nodes[1]], 100000).1
4264         };
4265
4266         let mut block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new());
4267         connect_block(&nodes[0], &block);
4268         connect_block(&nodes[1], &block);
4269         let block_count = TEST_FINAL_CLTV + CHAN_CONFIRM_DEPTH + 2 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS;
4270         for _ in CHAN_CONFIRM_DEPTH + 2..block_count {
4271                 block.header.prev_blockhash = block.block_hash();
4272                 connect_block(&nodes[0], &block);
4273                 connect_block(&nodes[1], &block);
4274         }
4275
4276         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
4277
4278         check_added_monitors!(nodes[1], 1);
4279         let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4280         assert!(htlc_timeout_updates.update_add_htlcs.is_empty());
4281         assert_eq!(htlc_timeout_updates.update_fail_htlcs.len(), 1);
4282         assert!(htlc_timeout_updates.update_fail_malformed_htlcs.is_empty());
4283         assert!(htlc_timeout_updates.update_fee.is_none());
4284
4285         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_timeout_updates.update_fail_htlcs[0]);
4286         commitment_signed_dance!(nodes[0], nodes[1], htlc_timeout_updates.commitment_signed, false);
4287         // 100_000 msat as u64, followed by the height at which we failed back above
4288         let mut expected_failure_data = (100_000 as u64).to_be_bytes().to_vec();
4289         expected_failure_data.extend_from_slice(&(block_count - 1).to_be_bytes());
4290         expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000 | 15, &expected_failure_data[..]);
4291 }
4292
4293 #[test]
4294 fn test_htlc_timeout() {
4295         do_test_htlc_timeout(true);
4296         do_test_htlc_timeout(false);
4297 }
4298
4299 fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) {
4300         // Tests that HTLCs in the holding cell are timed out after the requisite number of blocks.
4301         let chanmon_cfgs = create_chanmon_cfgs(3);
4302         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
4303         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
4304         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
4305         create_announced_chan_between_nodes(&nodes, 0, 1);
4306         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4307
4308         // Make sure all nodes are at the same starting height
4309         connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
4310         connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
4311         connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
4312
4313         // Route a first payment to get the 1 -> 2 channel in awaiting_raa...
4314         let (route, first_payment_hash, _, first_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
4315         nodes[1].node.send_payment_with_route(&route, first_payment_hash,
4316                 RecipientOnionFields::secret_only(first_payment_secret), PaymentId(first_payment_hash.0)).unwrap();
4317         assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1);
4318         check_added_monitors!(nodes[1], 1);
4319
4320         // Now attempt to route a second payment, which should be placed in the holding cell
4321         let sending_node = if forwarded_htlc { &nodes[0] } else { &nodes[1] };
4322         let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(sending_node, nodes[2], 100000);
4323         sending_node.node.send_payment_with_route(&route, second_payment_hash,
4324                 RecipientOnionFields::secret_only(second_payment_secret), PaymentId(second_payment_hash.0)).unwrap();
4325         if forwarded_htlc {
4326                 check_added_monitors!(nodes[0], 1);
4327                 let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
4328                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
4329                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
4330                 expect_pending_htlcs_forwardable!(nodes[1]);
4331         }
4332         check_added_monitors!(nodes[1], 0);
4333
4334         connect_blocks(&nodes[1], TEST_FINAL_CLTV - LATENCY_GRACE_PERIOD_BLOCKS);
4335         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4336         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
4337         connect_blocks(&nodes[1], 1);
4338
4339         if forwarded_htlc {
4340                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
4341                 check_added_monitors!(nodes[1], 1);
4342                 let fail_commit = nodes[1].node.get_and_clear_pending_msg_events();
4343                 assert_eq!(fail_commit.len(), 1);
4344                 match fail_commit[0] {
4345                         MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, ref commitment_signed, .. }, .. } => {
4346                                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
4347                                 commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, true, true);
4348                         },
4349                         _ => unreachable!(),
4350                 }
4351                 expect_payment_failed_with_update!(nodes[0], second_payment_hash, false, chan_2.0.contents.short_channel_id, false);
4352         } else {
4353                 expect_payment_failed!(nodes[1], second_payment_hash, false);
4354         }
4355 }
4356
4357 #[test]
4358 fn test_holding_cell_htlc_add_timeouts() {
4359         do_test_holding_cell_htlc_add_timeouts(false);
4360         do_test_holding_cell_htlc_add_timeouts(true);
4361 }
4362
4363 macro_rules! check_spendable_outputs {
4364         ($node: expr, $keysinterface: expr) => {
4365                 {
4366                         let mut events = $node.chain_monitor.chain_monitor.get_and_clear_pending_events();
4367                         let mut txn = Vec::new();
4368                         let mut all_outputs = Vec::new();
4369                         let secp_ctx = Secp256k1::new();
4370                         for event in events.drain(..) {
4371                                 match event {
4372                                         Event::SpendableOutputs { mut outputs, channel_id: _ } => {
4373                                                 for outp in outputs.drain(..) {
4374                                                         txn.push($keysinterface.backing.spend_spendable_outputs(&[&outp], Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx).unwrap());
4375                                                         all_outputs.push(outp);
4376                                                 }
4377                                         },
4378                                         _ => panic!("Unexpected event"),
4379                                 };
4380                         }
4381                         if all_outputs.len() > 1 {
4382                                 if let Ok(tx) = $keysinterface.backing.spend_spendable_outputs(&all_outputs.iter().map(|a| a).collect::<Vec<_>>(), Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx) {
4383                                         txn.push(tx);
4384                                 }
4385                         }
4386                         txn
4387                 }
4388         }
4389 }
4390
4391 #[test]
4392 fn test_claim_sizeable_push_msat() {
4393         // Incidentally test SpendableOutput event generation due to detection of to_local output on commitment tx
4394         let chanmon_cfgs = create_chanmon_cfgs(2);
4395         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4396         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4397         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4398
4399         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000);
4400         nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
4401         check_closed_broadcast!(nodes[1], true);
4402         check_added_monitors!(nodes[1], 1);
4403         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
4404         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4405         assert_eq!(node_txn.len(), 1);
4406         check_spends!(node_txn[0], chan.3);
4407         assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
4408
4409         mine_transaction(&nodes[1], &node_txn[0]);
4410         connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1);
4411
4412         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4413         assert_eq!(spend_txn.len(), 1);
4414         assert_eq!(spend_txn[0].input.len(), 1);
4415         check_spends!(spend_txn[0], node_txn[0]);
4416         assert_eq!(spend_txn[0].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
4417 }
4418
4419 #[test]
4420 fn test_claim_on_remote_sizeable_push_msat() {
4421         // Same test as previous, just test on remote commitment tx, as per_commitment_point registration changes following you're funder/fundee and
4422         // to_remote output is encumbered by a P2WPKH
4423         let chanmon_cfgs = create_chanmon_cfgs(2);
4424         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4425         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4426         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4427
4428         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000);
4429         nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
4430         check_closed_broadcast!(nodes[0], true);
4431         check_added_monitors!(nodes[0], 1);
4432         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
4433
4434         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4435         assert_eq!(node_txn.len(), 1);
4436         check_spends!(node_txn[0], chan.3);
4437         assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
4438
4439         mine_transaction(&nodes[1], &node_txn[0]);
4440         check_closed_broadcast!(nodes[1], true);
4441         check_added_monitors!(nodes[1], 1);
4442         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4443         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4444
4445         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4446         assert_eq!(spend_txn.len(), 1);
4447         check_spends!(spend_txn[0], node_txn[0]);
4448 }
4449
4450 #[test]
4451 fn test_claim_on_remote_revoked_sizeable_push_msat() {
4452         // Same test as previous, just test on remote revoked commitment tx, as per_commitment_point registration changes following you're funder/fundee and
4453         // to_remote output is encumbered by a P2WPKH
4454
4455         let chanmon_cfgs = create_chanmon_cfgs(2);
4456         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4457         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4458         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4459
4460         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 59000000);
4461         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4462         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan.2);
4463         assert_eq!(revoked_local_txn[0].input.len(), 1);
4464         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
4465
4466         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4467         mine_transaction(&nodes[1], &revoked_local_txn[0]);
4468         check_closed_broadcast!(nodes[1], true);
4469         check_added_monitors!(nodes[1], 1);
4470         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4471
4472         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4473         mine_transaction(&nodes[1], &node_txn[0]);
4474         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4475
4476         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4477         assert_eq!(spend_txn.len(), 3);
4478         check_spends!(spend_txn[0], revoked_local_txn[0]); // to_remote output on revoked remote commitment_tx
4479         check_spends!(spend_txn[1], node_txn[0]);
4480         check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[0]); // Both outputs
4481 }
4482
4483 #[test]
4484 fn test_static_spendable_outputs_preimage_tx() {
4485         let chanmon_cfgs = create_chanmon_cfgs(2);
4486         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4487         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4488         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4489
4490         // Create some initial channels
4491         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4492
4493         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
4494
4495         let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
4496         assert_eq!(commitment_tx[0].input.len(), 1);
4497         assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
4498
4499         // Settle A's commitment tx on B's chain
4500         nodes[1].node.claim_funds(payment_preimage);
4501         expect_payment_claimed!(nodes[1], payment_hash, 3_000_000);
4502         check_added_monitors!(nodes[1], 1);
4503         mine_transaction(&nodes[1], &commitment_tx[0]);
4504         check_added_monitors!(nodes[1], 1);
4505         let events = nodes[1].node.get_and_clear_pending_msg_events();
4506         match events[0] {
4507                 MessageSendEvent::UpdateHTLCs { .. } => {},
4508                 _ => panic!("Unexpected event"),
4509         }
4510         match events[1] {
4511                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4512                 _ => panic!("Unexepected event"),
4513         }
4514
4515         // Check B's monitor was able to send back output descriptor event for preimage tx on A's commitment tx
4516         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: preimage tx
4517         assert_eq!(node_txn.len(), 1);
4518         check_spends!(node_txn[0], commitment_tx[0]);
4519         assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
4520
4521         mine_transaction(&nodes[1], &node_txn[0]);
4522         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4523         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4524
4525         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4526         assert_eq!(spend_txn.len(), 1);
4527         check_spends!(spend_txn[0], node_txn[0]);
4528 }
4529
4530 #[test]
4531 fn test_static_spendable_outputs_timeout_tx() {
4532         let chanmon_cfgs = create_chanmon_cfgs(2);
4533         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4534         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4535         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4536
4537         // Create some initial channels
4538         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4539
4540         // Rebalance the network a bit by relaying one payment through all the channels ...
4541         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
4542
4543         let (_, our_payment_hash, _) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000);
4544
4545         let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
4546         assert_eq!(commitment_tx[0].input.len(), 1);
4547         assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
4548
4549         // Settle A's commitment tx on B' chain
4550         mine_transaction(&nodes[1], &commitment_tx[0]);
4551         check_added_monitors!(nodes[1], 1);
4552         let events = nodes[1].node.get_and_clear_pending_msg_events();
4553         match events[0] {
4554                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4555                 _ => panic!("Unexpected event"),
4556         }
4557         connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
4558
4559         // Check B's monitor was able to send back output descriptor event for timeout tx on A's commitment tx
4560         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4561         assert_eq!(node_txn.len(), 1); // ChannelMonitor: timeout tx
4562         check_spends!(node_txn[0],  commitment_tx[0].clone());
4563         assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4564
4565         mine_transaction(&nodes[1], &node_txn[0]);
4566         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4567         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4568         expect_payment_failed!(nodes[1], our_payment_hash, false);
4569
4570         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4571         assert_eq!(spend_txn.len(), 3); // SpendableOutput: remote_commitment_tx.to_remote, timeout_tx.output
4572         check_spends!(spend_txn[0], commitment_tx[0]);
4573         check_spends!(spend_txn[1], node_txn[0]);
4574         check_spends!(spend_txn[2], node_txn[0], commitment_tx[0]); // All outputs
4575 }
4576
4577 #[test]
4578 fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
4579         let chanmon_cfgs = create_chanmon_cfgs(2);
4580         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4581         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4582         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4583
4584         // Create some initial channels
4585         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4586
4587         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4588         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
4589         assert_eq!(revoked_local_txn[0].input.len(), 1);
4590         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4591
4592         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4593
4594         mine_transaction(&nodes[1], &revoked_local_txn[0]);
4595         check_closed_broadcast!(nodes[1], true);
4596         check_added_monitors!(nodes[1], 1);
4597         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4598
4599         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4600         assert_eq!(node_txn.len(), 1);
4601         assert_eq!(node_txn[0].input.len(), 2);
4602         check_spends!(node_txn[0], revoked_local_txn[0]);
4603
4604         mine_transaction(&nodes[1], &node_txn[0]);
4605         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4606
4607         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4608         assert_eq!(spend_txn.len(), 1);
4609         check_spends!(spend_txn[0], node_txn[0]);
4610 }
4611
4612 #[test]
4613 fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
4614         let mut chanmon_cfgs = create_chanmon_cfgs(2);
4615         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
4616         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4617         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4618         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4619
4620         // Create some initial channels
4621         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4622
4623         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4624         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
4625         assert_eq!(revoked_local_txn[0].input.len(), 1);
4626         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4627
4628         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4629
4630         // A will generate HTLC-Timeout from revoked commitment tx
4631         mine_transaction(&nodes[0], &revoked_local_txn[0]);
4632         check_closed_broadcast!(nodes[0], true);
4633         check_added_monitors!(nodes[0], 1);
4634         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
4635         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
4636
4637         let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4638         assert_eq!(revoked_htlc_txn.len(), 1);
4639         assert_eq!(revoked_htlc_txn[0].input.len(), 1);
4640         assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
4641         check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]);
4642         assert_ne!(revoked_htlc_txn[0].lock_time.0, 0); // HTLC-Timeout
4643
4644         // B will generate justice tx from A's revoked commitment/HTLC tx
4645         connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
4646         check_closed_broadcast!(nodes[1], true);
4647         check_added_monitors!(nodes[1], 1);
4648         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4649
4650         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4651         assert_eq!(node_txn.len(), 2); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs
4652         // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0]
4653         // including the one already spent by revoked_htlc_txn[1]. That's OK, we'll spend with valid
4654         // transactions next...
4655         assert_eq!(node_txn[0].input.len(), 3);
4656         check_spends!(node_txn[0], revoked_local_txn[0], revoked_htlc_txn[0]);
4657
4658         assert_eq!(node_txn[1].input.len(), 2);
4659         check_spends!(node_txn[1], revoked_local_txn[0], revoked_htlc_txn[0]);
4660         if node_txn[1].input[1].previous_output.txid == revoked_htlc_txn[0].txid() {
4661                 assert_ne!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4662         } else {
4663                 assert_eq!(node_txn[1].input[0].previous_output.txid, revoked_htlc_txn[0].txid());
4664                 assert_ne!(node_txn[1].input[1].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4665         }
4666
4667         mine_transaction(&nodes[1], &node_txn[1]);
4668         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4669
4670         // Check B's ChannelMonitor was able to generate the right spendable output descriptor
4671         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4672         assert_eq!(spend_txn.len(), 1);
4673         assert_eq!(spend_txn[0].input.len(), 1);
4674         check_spends!(spend_txn[0], node_txn[1]);
4675 }
4676
4677 #[test]
4678 fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
4679         let mut chanmon_cfgs = create_chanmon_cfgs(2);
4680         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
4681         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4682         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4683         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4684
4685         // Create some initial channels
4686         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4687
4688         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4689         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
4690         assert_eq!(revoked_local_txn[0].input.len(), 1);
4691         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4692
4693         // The to-be-revoked commitment tx should have one HTLC and one to_remote output
4694         assert_eq!(revoked_local_txn[0].output.len(), 2);
4695
4696         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4697
4698         // B will generate HTLC-Success from revoked commitment tx
4699         mine_transaction(&nodes[1], &revoked_local_txn[0]);
4700         check_closed_broadcast!(nodes[1], true);
4701         check_added_monitors!(nodes[1], 1);
4702         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4703         let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4704
4705         assert_eq!(revoked_htlc_txn.len(), 1);
4706         assert_eq!(revoked_htlc_txn[0].input.len(), 1);
4707         assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4708         check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]);
4709
4710         // Check that the unspent (of two) outputs on revoked_local_txn[0] is a P2WPKH:
4711         let unspent_local_txn_output = revoked_htlc_txn[0].input[0].previous_output.vout as usize ^ 1;
4712         assert_eq!(revoked_local_txn[0].output[unspent_local_txn_output].script_pubkey.len(), 2 + 20); // P2WPKH
4713
4714         // A will generate justice tx from B's revoked commitment/HTLC tx
4715         connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
4716         check_closed_broadcast!(nodes[0], true);
4717         check_added_monitors!(nodes[0], 1);
4718         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
4719
4720         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4721         assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success
4722
4723         // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0]
4724         // including the one already spent by revoked_htlc_txn[0]. That's OK, we'll spend with valid
4725         // transactions next...
4726         assert_eq!(node_txn[0].input.len(), 2);
4727         check_spends!(node_txn[0], revoked_local_txn[0], revoked_htlc_txn[0]);
4728         if node_txn[0].input[1].previous_output.txid == revoked_htlc_txn[0].txid() {
4729                 assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4730         } else {
4731                 assert_eq!(node_txn[0].input[0].previous_output.txid, revoked_htlc_txn[0].txid());
4732                 assert_eq!(node_txn[0].input[1].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4733         }
4734
4735         assert_eq!(node_txn[1].input.len(), 1);
4736         check_spends!(node_txn[1], revoked_htlc_txn[0]);
4737
4738         mine_transaction(&nodes[0], &node_txn[1]);
4739         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
4740
4741         // Note that nodes[0]'s tx_broadcaster is still locked, so if we get here the channelmonitor
4742         // didn't try to generate any new transactions.
4743
4744         // Check A's ChannelMonitor was able to generate the right spendable output descriptor
4745         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
4746         assert_eq!(spend_txn.len(), 3);
4747         assert_eq!(spend_txn[0].input.len(), 1);
4748         check_spends!(spend_txn[0], revoked_local_txn[0]); // spending to_remote output from revoked local tx
4749         assert_ne!(spend_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4750         check_spends!(spend_txn[1], node_txn[1]); // spending justice tx output on the htlc success tx
4751         check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[1]); // Both outputs
4752 }
4753
4754 #[test]
4755 fn test_onchain_to_onchain_claim() {
4756         // Test that in case of channel closure, we detect the state of output and claim HTLC
4757         // on downstream peer's remote commitment tx.
4758         // First, have C claim an HTLC against its own latest commitment transaction.
4759         // Then, broadcast these to B, which should update the monitor downstream on the A<->B
4760         // channel.
4761         // Finally, check that B will claim the HTLC output if A's latest commitment transaction
4762         // gets broadcast.
4763
4764         let chanmon_cfgs = create_chanmon_cfgs(3);
4765         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
4766         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
4767         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
4768
4769         // Create some initial channels
4770         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4771         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4772
4773         // Ensure all nodes are at the same height
4774         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
4775         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
4776         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
4777         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
4778
4779         // Rebalance the network a bit by relaying one payment through all the channels ...
4780         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
4781         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
4782
4783         let (payment_preimage, payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
4784         let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
4785         check_spends!(commitment_tx[0], chan_2.3);
4786         nodes[2].node.claim_funds(payment_preimage);
4787         expect_payment_claimed!(nodes[2], payment_hash, 3_000_000);
4788         check_added_monitors!(nodes[2], 1);
4789         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
4790         assert!(updates.update_add_htlcs.is_empty());
4791         assert!(updates.update_fail_htlcs.is_empty());
4792         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
4793         assert!(updates.update_fail_malformed_htlcs.is_empty());
4794
4795         mine_transaction(&nodes[2], &commitment_tx[0]);
4796         check_closed_broadcast!(nodes[2], true);
4797         check_added_monitors!(nodes[2], 1);
4798         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
4799
4800         let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 1 (HTLC-Success tx)
4801         assert_eq!(c_txn.len(), 1);
4802         check_spends!(c_txn[0], commitment_tx[0]);
4803         assert_eq!(c_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4804         assert!(c_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
4805         assert_eq!(c_txn[0].lock_time.0, 0); // Success tx
4806
4807         // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor
4808         connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), c_txn[0].clone()]));
4809         check_added_monitors!(nodes[1], 1);
4810         let events = nodes[1].node.get_and_clear_pending_events();
4811         assert_eq!(events.len(), 2);
4812         match events[0] {
4813                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
4814                 _ => panic!("Unexpected event"),
4815         }
4816         match events[1] {
4817                 Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => {
4818                         assert_eq!(fee_earned_msat, Some(1000));
4819                         assert_eq!(prev_channel_id, Some(chan_1.2));
4820                         assert_eq!(claim_from_onchain_tx, true);
4821                         assert_eq!(next_channel_id, Some(chan_2.2));
4822                         assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
4823                 },
4824                 _ => panic!("Unexpected event"),
4825         }
4826         check_added_monitors!(nodes[1], 1);
4827         let mut msg_events = nodes[1].node.get_and_clear_pending_msg_events();
4828         assert_eq!(msg_events.len(), 3);
4829         let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut msg_events);
4830         let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut msg_events);
4831
4832         match nodes_2_event {
4833                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id: _ } => {},
4834                 _ => panic!("Unexpected event"),
4835         }
4836
4837         match nodes_0_event {
4838                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
4839                         assert!(update_add_htlcs.is_empty());
4840                         assert!(update_fail_htlcs.is_empty());
4841                         assert_eq!(update_fulfill_htlcs.len(), 1);
4842                         assert!(update_fail_malformed_htlcs.is_empty());
4843                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
4844                 },
4845                 _ => panic!("Unexpected event"),
4846         };
4847
4848         // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
4849         match msg_events[0] {
4850                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4851                 _ => panic!("Unexpected event"),
4852         }
4853
4854         // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
4855         let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
4856         mine_transaction(&nodes[1], &commitment_tx[0]);
4857         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4858         let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4859         // ChannelMonitor: HTLC-Success tx
4860         assert_eq!(b_txn.len(), 1);
4861         check_spends!(b_txn[0], commitment_tx[0]);
4862         assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
4863         assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
4864         assert_eq!(b_txn[0].lock_time.0, nodes[1].best_block_info().1); // Success tx
4865
4866         check_closed_broadcast!(nodes[1], true);
4867         check_added_monitors!(nodes[1], 1);
4868 }
4869
4870 #[test]
4871 fn test_duplicate_payment_hash_one_failure_one_success() {
4872         // Topology : A --> B --> C --> D
4873         // We route 2 payments with same hash between B and C, one will be timeout, the other successfully claim
4874         // Note that because C will refuse to generate two payment secrets for the same payment hash,
4875         // we forward one of the payments onwards to D.
4876         let chanmon_cfgs = create_chanmon_cfgs(4);
4877         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
4878         // When this test was written, the default base fee floated based on the HTLC count.
4879         // It is now fixed, so we simply set the fee to the expected value here.
4880         let mut config = test_default_channel_config();
4881         config.channel_config.forwarding_fee_base_msat = 196;
4882         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs,
4883                 &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
4884         let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
4885
4886         create_announced_chan_between_nodes(&nodes, 0, 1);
4887         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4888         create_announced_chan_between_nodes(&nodes, 2, 3);
4889
4890         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
4891         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
4892         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
4893         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
4894         connect_blocks(&nodes[3], node_max_height - nodes[3].best_block_info().1);
4895
4896         let (our_payment_preimage, duplicate_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 900_000);
4897
4898         let payment_secret = nodes[3].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200, None).unwrap();
4899         // We reduce the final CLTV here by a somewhat arbitrary constant to keep it under the one-byte
4900         // script push size limit so that the below script length checks match
4901         // ACCEPTED_HTLC_SCRIPT_WEIGHT.
4902         let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV - 40)
4903                 .with_bolt11_features(nodes[3].node.invoice_features()).unwrap();
4904         let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[3], payment_params, 800_000);
4905         send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[2], &nodes[3]]], 800_000, duplicate_payment_hash, payment_secret);
4906
4907         let commitment_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
4908         assert_eq!(commitment_txn[0].input.len(), 1);
4909         check_spends!(commitment_txn[0], chan_2.3);
4910
4911         mine_transaction(&nodes[1], &commitment_txn[0]);
4912         check_closed_broadcast!(nodes[1], true);
4913         check_added_monitors!(nodes[1], 1);
4914         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
4915         connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
4916
4917         let htlc_timeout_tx;
4918         { // Extract one of the two HTLC-Timeout transaction
4919                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
4920                 // ChannelMonitor: timeout tx * 2-or-3
4921                 assert!(node_txn.len() == 2 || node_txn.len() == 3);
4922
4923                 check_spends!(node_txn[0], commitment_txn[0]);
4924                 assert_eq!(node_txn[0].input.len(), 1);
4925                 assert_eq!(node_txn[0].output.len(), 1);
4926
4927                 if node_txn.len() > 2 {
4928                         check_spends!(node_txn[1], commitment_txn[0]);
4929                         assert_eq!(node_txn[1].input.len(), 1);
4930                         assert_eq!(node_txn[1].output.len(), 1);
4931                         assert_eq!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
4932
4933                         check_spends!(node_txn[2], commitment_txn[0]);
4934                         assert_eq!(node_txn[2].input.len(), 1);
4935                         assert_eq!(node_txn[2].output.len(), 1);
4936                         assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output);
4937                 } else {
4938                         check_spends!(node_txn[1], commitment_txn[0]);
4939                         assert_eq!(node_txn[1].input.len(), 1);
4940                         assert_eq!(node_txn[1].output.len(), 1);
4941                         assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
4942                 }
4943
4944                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4945                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4946                 // Assign htlc_timeout_tx to the forwarded HTLC (with value ~800 sats). The received HTLC
4947                 // (with value 900 sats) will be claimed in the below `claim_funds` call.
4948                 if node_txn.len() > 2 {
4949                         assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4950                         htlc_timeout_tx = if node_txn[2].output[0].value < 900 { node_txn[2].clone() } else { node_txn[0].clone() };
4951                 } else {
4952                         htlc_timeout_tx = if node_txn[0].output[0].value < 900 { node_txn[1].clone() } else { node_txn[0].clone() };
4953                 }
4954         }
4955
4956         nodes[2].node.claim_funds(our_payment_preimage);
4957         expect_payment_claimed!(nodes[2], duplicate_payment_hash, 900_000);
4958
4959         mine_transaction(&nodes[2], &commitment_txn[0]);
4960         check_added_monitors!(nodes[2], 2);
4961         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
4962         let events = nodes[2].node.get_and_clear_pending_msg_events();
4963         match events[0] {
4964                 MessageSendEvent::UpdateHTLCs { .. } => {},
4965                 _ => panic!("Unexpected event"),
4966         }
4967         match events[1] {
4968                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4969                 _ => panic!("Unexepected event"),
4970         }
4971         let htlc_success_txn: Vec<_> = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4972         assert_eq!(htlc_success_txn.len(), 2); // ChannelMonitor: HTLC-Success txn (*2 due to 2-HTLC outputs)
4973         check_spends!(htlc_success_txn[0], commitment_txn[0]);
4974         check_spends!(htlc_success_txn[1], commitment_txn[0]);
4975         assert_eq!(htlc_success_txn[0].input.len(), 1);
4976         assert_eq!(htlc_success_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4977         assert_eq!(htlc_success_txn[1].input.len(), 1);
4978         assert_eq!(htlc_success_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4979         assert_ne!(htlc_success_txn[0].input[0].previous_output, htlc_success_txn[1].input[0].previous_output);
4980         assert_ne!(htlc_success_txn[1].input[0].previous_output, htlc_timeout_tx.input[0].previous_output);
4981
4982         mine_transaction(&nodes[1], &htlc_timeout_tx);
4983         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4984         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
4985         let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4986         assert!(htlc_updates.update_add_htlcs.is_empty());
4987         assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
4988         let first_htlc_id = htlc_updates.update_fail_htlcs[0].htlc_id;
4989         assert!(htlc_updates.update_fulfill_htlcs.is_empty());
4990         assert!(htlc_updates.update_fail_malformed_htlcs.is_empty());
4991         check_added_monitors!(nodes[1], 1);
4992
4993         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
4994         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4995         {
4996                 commitment_signed_dance!(nodes[0], nodes[1], &htlc_updates.commitment_signed, false, true);
4997         }
4998         expect_payment_failed_with_update!(nodes[0], duplicate_payment_hash, false, chan_2.0.contents.short_channel_id, true);
4999
5000         // Solve 2nd HTLC by broadcasting on B's chain HTLC-Success Tx from C
5001         mine_transaction(&nodes[1], &htlc_success_txn[1]);
5002         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(196), true, true);
5003         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5004         assert!(updates.update_add_htlcs.is_empty());
5005         assert!(updates.update_fail_htlcs.is_empty());
5006         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
5007         assert_ne!(updates.update_fulfill_htlcs[0].htlc_id, first_htlc_id);
5008         assert!(updates.update_fail_malformed_htlcs.is_empty());
5009         check_added_monitors!(nodes[1], 1);
5010
5011         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
5012         commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false);
5013         expect_payment_sent(&nodes[0], our_payment_preimage, None, true, true);
5014 }
5015
5016 #[test]
5017 fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
5018         let chanmon_cfgs = create_chanmon_cfgs(2);
5019         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5020         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5021         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5022
5023         // Create some initial channels
5024         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5025
5026         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
5027         let local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
5028         assert_eq!(local_txn.len(), 1);
5029         assert_eq!(local_txn[0].input.len(), 1);
5030         check_spends!(local_txn[0], chan_1.3);
5031
5032         // Give B knowledge of preimage to be able to generate a local HTLC-Success Tx
5033         nodes[1].node.claim_funds(payment_preimage);
5034         expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
5035         check_added_monitors!(nodes[1], 1);
5036
5037         mine_transaction(&nodes[1], &local_txn[0]);
5038         check_added_monitors!(nodes[1], 1);
5039         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
5040         let events = nodes[1].node.get_and_clear_pending_msg_events();
5041         match events[0] {
5042                 MessageSendEvent::UpdateHTLCs { .. } => {},
5043                 _ => panic!("Unexpected event"),
5044         }
5045         match events[1] {
5046                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
5047                 _ => panic!("Unexepected event"),
5048         }
5049         let node_tx = {
5050                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5051                 assert_eq!(node_txn.len(), 1);
5052                 assert_eq!(node_txn[0].input.len(), 1);
5053                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5054                 check_spends!(node_txn[0], local_txn[0]);
5055                 node_txn[0].clone()
5056         };
5057
5058         mine_transaction(&nodes[1], &node_tx);
5059         connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1);
5060
5061         // Verify that B is able to spend its own HTLC-Success tx thanks to spendable output event given back by its ChannelMonitor
5062         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5063         assert_eq!(spend_txn.len(), 1);
5064         assert_eq!(spend_txn[0].input.len(), 1);
5065         check_spends!(spend_txn[0], node_tx);
5066         assert_eq!(spend_txn[0].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5067 }
5068
5069 fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, announce_latest: bool) {
5070         // Test that we fail backwards the full set of HTLCs we need to when remote broadcasts an
5071         // unrevoked commitment transaction.
5072         // This includes HTLCs which were below the dust threshold as well as HTLCs which were awaiting
5073         // a remote RAA before they could be failed backwards (and combinations thereof).
5074         // We also test duplicate-hash HTLCs by adding two nodes on each side of the target nodes which
5075         // use the same payment hashes.
5076         // Thus, we use a six-node network:
5077         //
5078         // A \         / E
5079         //    - C - D -
5080         // B /         \ F
5081         // And test where C fails back to A/B when D announces its latest commitment transaction
5082         let chanmon_cfgs = create_chanmon_cfgs(6);
5083         let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
5084         // When this test was written, the default base fee floated based on the HTLC count.
5085         // It is now fixed, so we simply set the fee to the expected value here.
5086         let mut config = test_default_channel_config();
5087         config.channel_config.forwarding_fee_base_msat = 196;
5088         let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs,
5089                 &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
5090         let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
5091
5092         let _chan_0_2 = create_announced_chan_between_nodes(&nodes, 0, 2);
5093         let _chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
5094         let chan_2_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
5095         let chan_3_4 = create_announced_chan_between_nodes(&nodes, 3, 4);
5096         let chan_3_5  = create_announced_chan_between_nodes(&nodes, 3, 5);
5097
5098         // Rebalance and check output sanity...
5099         send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000);
5100         send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000);
5101         assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2);
5102
5103         let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id())
5104                 .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context.holder_dust_limit_satoshis;
5105         // 0th HTLC:
5106         let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5107         // 1st HTLC:
5108         let (_, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5109         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
5110         // 2nd HTLC:
5111         send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_1, nodes[5].node.create_inbound_payment_for_hash(payment_hash_1, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5112         // 3rd HTLC:
5113         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_2, nodes[5].node.create_inbound_payment_for_hash(payment_hash_2, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5114         // 4th HTLC:
5115         let (_, payment_hash_3, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5116         // 5th HTLC:
5117         let (_, payment_hash_4, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5118         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
5119         // 6th HTLC:
5120         send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_3, nodes[5].node.create_inbound_payment_for_hash(payment_hash_3, None, 7200, None).unwrap());
5121         // 7th HTLC:
5122         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_4, nodes[5].node.create_inbound_payment_for_hash(payment_hash_4, None, 7200, None).unwrap());
5123
5124         // 8th HTLC:
5125         let (_, payment_hash_5, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5126         // 9th HTLC:
5127         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
5128         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_5, nodes[5].node.create_inbound_payment_for_hash(payment_hash_5, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5129
5130         // 10th HTLC:
5131         let (_, payment_hash_6, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5132         // 11th HTLC:
5133         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
5134         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_6, nodes[5].node.create_inbound_payment_for_hash(payment_hash_6, None, 7200, None).unwrap());
5135
5136         // Double-check that six of the new HTLC were added
5137         // We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie,
5138         // with to_local and to_remote outputs, 8 outputs and 6 HTLCs not included).
5139         assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2).len(), 1);
5140         assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 8);
5141
5142         // Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go.
5143         // Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs
5144         nodes[4].node.fail_htlc_backwards(&payment_hash_1);
5145         nodes[4].node.fail_htlc_backwards(&payment_hash_3);
5146         nodes[4].node.fail_htlc_backwards(&payment_hash_5);
5147         nodes[4].node.fail_htlc_backwards(&payment_hash_6);
5148         check_added_monitors!(nodes[4], 0);
5149
5150         let failed_destinations = vec![
5151                 HTLCDestination::FailedPayment { payment_hash: payment_hash_1 },
5152                 HTLCDestination::FailedPayment { payment_hash: payment_hash_3 },
5153                 HTLCDestination::FailedPayment { payment_hash: payment_hash_5 },
5154                 HTLCDestination::FailedPayment { payment_hash: payment_hash_6 },
5155         ];
5156         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[4], failed_destinations);
5157         check_added_monitors!(nodes[4], 1);
5158
5159         let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id());
5160         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[0]);
5161         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[1]);
5162         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[2]);
5163         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[3]);
5164         commitment_signed_dance!(nodes[3], nodes[4], four_removes.commitment_signed, false);
5165
5166         // Fail 3rd below-dust and 7th above-dust HTLCs
5167         nodes[5].node.fail_htlc_backwards(&payment_hash_2);
5168         nodes[5].node.fail_htlc_backwards(&payment_hash_4);
5169         check_added_monitors!(nodes[5], 0);
5170
5171         let failed_destinations_2 = vec![
5172                 HTLCDestination::FailedPayment { payment_hash: payment_hash_2 },
5173                 HTLCDestination::FailedPayment { payment_hash: payment_hash_4 },
5174         ];
5175         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[5], failed_destinations_2);
5176         check_added_monitors!(nodes[5], 1);
5177
5178         let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id());
5179         nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[0]);
5180         nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]);
5181         commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false);
5182
5183         let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
5184
5185         // After 4 and 2 removes respectively above in nodes[4] and nodes[5], nodes[3] should receive 6 PaymentForwardedFailed events
5186         let failed_destinations_3 = vec![
5187                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5188                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5189                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5190                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5191                 HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
5192                 HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
5193         ];
5194         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations_3);
5195         check_added_monitors!(nodes[3], 1);
5196         let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
5197         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]);
5198         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[1]);
5199         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[2]);
5200         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[3]);
5201         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[4]);
5202         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[5]);
5203         if deliver_last_raa {
5204                 commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false);
5205         } else {
5206                 let _cs_last_raa = commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false, true, false, true);
5207         }
5208
5209         // D's latest commitment transaction now contains 1st + 2nd + 9th HTLCs (implicitly, they're
5210         // below the dust limit) and the 5th + 6th + 11th HTLCs. It has failed back the 0th, 3rd, 4th,
5211         // 7th, 8th, and 10th, but as we haven't yet delivered the final RAA to C, the fails haven't
5212         // propagated back to A/B yet (and D has two unrevoked commitment transactions).
5213         //
5214         // We now broadcast the latest commitment transaction, which *should* result in failures for
5215         // the 0th, 1st, 2nd, 3rd, 4th, 7th, 8th, 9th, and 10th HTLCs, ie all the below-dust HTLCs and
5216         // the non-broadcast above-dust HTLCs.
5217         //
5218         // Alternatively, we may broadcast the previous commitment transaction, which should only
5219         // result in failures for the below-dust HTLCs, ie the 0th, 1st, 2nd, 3rd, 9th, and 10th HTLCs.
5220         let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
5221
5222         if announce_latest {
5223                 mine_transaction(&nodes[2], &ds_last_commitment_tx[0]);
5224         } else {
5225                 mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]);
5226         }
5227         let events = nodes[2].node.get_and_clear_pending_events();
5228         let close_event = if deliver_last_raa {
5229                 assert_eq!(events.len(), 2 + 6);
5230                 events.last().clone().unwrap()
5231         } else {
5232                 assert_eq!(events.len(), 1);
5233                 events.last().clone().unwrap()
5234         };
5235         match close_event {
5236                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
5237                 _ => panic!("Unexpected event"),
5238         }
5239
5240         connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
5241         check_closed_broadcast!(nodes[2], true);
5242         if deliver_last_raa {
5243                 expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
5244
5245                 let expected_destinations: Vec<HTLCDestination> = repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect();
5246                 expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations);
5247         } else {
5248                 let expected_destinations: Vec<HTLCDestination> = if announce_latest {
5249                         repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(9).collect()
5250                 } else {
5251                         repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(6).collect()
5252                 };
5253
5254                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], expected_destinations);
5255         }
5256         check_added_monitors!(nodes[2], 3);
5257
5258         let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events();
5259         assert_eq!(cs_msgs.len(), 2);
5260         let mut a_done = false;
5261         for msg in cs_msgs {
5262                 match msg {
5263                         MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
5264                                 // Both under-dust HTLCs and the one above-dust HTLC that we had already failed
5265                                 // should be failed-backwards here.
5266                                 let target = if *node_id == nodes[0].node.get_our_node_id() {
5267                                         // If announce_latest, expect 0th, 1st, 4th, 8th, 10th HTLCs, else only 0th, 1st, 10th below-dust HTLCs
5268                                         for htlc in &updates.update_fail_htlcs {
5269                                                 assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 6 || if announce_latest { htlc.htlc_id == 3 || htlc.htlc_id == 5 } else { false });
5270                                         }
5271                                         assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 5 } else { 3 });
5272                                         assert!(!a_done);
5273                                         a_done = true;
5274                                         &nodes[0]
5275                                 } else {
5276                                         // If announce_latest, expect 2nd, 3rd, 7th, 9th HTLCs, else only 2nd, 3rd, 9th below-dust HTLCs
5277                                         for htlc in &updates.update_fail_htlcs {
5278                                                 assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 5 || if announce_latest { htlc.htlc_id == 4 } else { false });
5279                                         }
5280                                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
5281                                         assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 4 } else { 3 });
5282                                         &nodes[1]
5283                                 };
5284                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
5285                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[1]);
5286                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[2]);
5287                                 if announce_latest {
5288                                         target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[3]);
5289                                         if *node_id == nodes[0].node.get_our_node_id() {
5290                                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[4]);
5291                                         }
5292                                 }
5293                                 commitment_signed_dance!(target, nodes[2], updates.commitment_signed, false, true);
5294                         },
5295                         _ => panic!("Unexpected event"),
5296                 }
5297         }
5298
5299         let as_events = nodes[0].node.get_and_clear_pending_events();
5300         assert_eq!(as_events.len(), if announce_latest { 10 } else { 6 });
5301         let mut as_failds = HashSet::new();
5302         let mut as_updates = 0;
5303         for event in as_events.iter() {
5304                 if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
5305                         assert!(as_failds.insert(*payment_hash));
5306                         if *payment_hash != payment_hash_2 {
5307                                 assert_eq!(*payment_failed_permanently, deliver_last_raa);
5308                         } else {
5309                                 assert!(!payment_failed_permanently);
5310                         }
5311                         if let PathFailure::OnPath { network_update: Some(_) } = failure {
5312                                 as_updates += 1;
5313                         }
5314                 } else if let &Event::PaymentFailed { .. } = event {
5315                 } else { panic!("Unexpected event"); }
5316         }
5317         assert!(as_failds.contains(&payment_hash_1));
5318         assert!(as_failds.contains(&payment_hash_2));
5319         if announce_latest {
5320                 assert!(as_failds.contains(&payment_hash_3));
5321                 assert!(as_failds.contains(&payment_hash_5));
5322         }
5323         assert!(as_failds.contains(&payment_hash_6));
5324
5325         let bs_events = nodes[1].node.get_and_clear_pending_events();
5326         assert_eq!(bs_events.len(), if announce_latest { 8 } else { 6 });
5327         let mut bs_failds = HashSet::new();
5328         let mut bs_updates = 0;
5329         for event in bs_events.iter() {
5330                 if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
5331                         assert!(bs_failds.insert(*payment_hash));
5332                         if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 {
5333                                 assert_eq!(*payment_failed_permanently, deliver_last_raa);
5334                         } else {
5335                                 assert!(!payment_failed_permanently);
5336                         }
5337                         if let PathFailure::OnPath { network_update: Some(_) } = failure {
5338                                 bs_updates += 1;
5339                         }
5340                 } else if let &Event::PaymentFailed { .. } = event {
5341                 } else { panic!("Unexpected event"); }
5342         }
5343         assert!(bs_failds.contains(&payment_hash_1));
5344         assert!(bs_failds.contains(&payment_hash_2));
5345         if announce_latest {
5346                 assert!(bs_failds.contains(&payment_hash_4));
5347         }
5348         assert!(bs_failds.contains(&payment_hash_5));
5349
5350         // For each HTLC which was not failed-back by normal process (ie deliver_last_raa), we should
5351         // get a NetworkUpdate. A should have gotten 4 HTLCs which were failed-back due to
5352         // unknown-preimage-etc, B should have gotten 2. Thus, in the
5353         // announce_latest && deliver_last_raa case, we should have 5-4=1 and 4-2=2 NetworkUpdates.
5354         assert_eq!(as_updates, if deliver_last_raa { 1 } else if !announce_latest { 3 } else { 5 });
5355         assert_eq!(bs_updates, if deliver_last_raa { 2 } else if !announce_latest { 3 } else { 4 });
5356 }
5357
5358 #[test]
5359 fn test_fail_backwards_latest_remote_announce_a() {
5360         do_test_fail_backwards_unrevoked_remote_announce(false, true);
5361 }
5362
5363 #[test]
5364 fn test_fail_backwards_latest_remote_announce_b() {
5365         do_test_fail_backwards_unrevoked_remote_announce(true, true);
5366 }
5367
5368 #[test]
5369 fn test_fail_backwards_previous_remote_announce() {
5370         do_test_fail_backwards_unrevoked_remote_announce(false, false);
5371         // Note that true, true doesn't make sense as it implies we announce a revoked state, which is
5372         // tested for in test_commitment_revoked_fail_backward_exhaustive()
5373 }
5374
5375 #[test]
5376 fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
5377         let chanmon_cfgs = create_chanmon_cfgs(2);
5378         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5379         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5380         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5381
5382         // Create some initial channels
5383         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5384
5385         let (_, our_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
5386         let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
5387         assert_eq!(local_txn[0].input.len(), 1);
5388         check_spends!(local_txn[0], chan_1.3);
5389
5390         // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
5391         mine_transaction(&nodes[0], &local_txn[0]);
5392         check_closed_broadcast!(nodes[0], true);
5393         check_added_monitors!(nodes[0], 1);
5394         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
5395         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
5396
5397         let htlc_timeout = {
5398                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5399                 assert_eq!(node_txn.len(), 1);
5400                 assert_eq!(node_txn[0].input.len(), 1);
5401                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5402                 check_spends!(node_txn[0], local_txn[0]);
5403                 node_txn[0].clone()
5404         };
5405
5406         mine_transaction(&nodes[0], &htlc_timeout);
5407         connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
5408         expect_payment_failed!(nodes[0], our_payment_hash, false);
5409
5410         // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
5411         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
5412         assert_eq!(spend_txn.len(), 3);
5413         check_spends!(spend_txn[0], local_txn[0]);
5414         assert_eq!(spend_txn[1].input.len(), 1);
5415         check_spends!(spend_txn[1], htlc_timeout);
5416         assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5417         assert_eq!(spend_txn[2].input.len(), 2);
5418         check_spends!(spend_txn[2], local_txn[0], htlc_timeout);
5419         assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 ||
5420                 spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32);
5421 }
5422
5423 #[test]
5424 fn test_key_derivation_params() {
5425         // This test is a copy of test_dynamic_spendable_outputs_local_htlc_timeout_tx, with a key
5426         // manager rotation to test that `channel_keys_id` returned in
5427         // [`SpendableOutputDescriptor::DelayedPaymentOutput`] let us re-derive the channel key set to
5428         // then derive a `delayed_payment_key`.
5429
5430         let chanmon_cfgs = create_chanmon_cfgs(3);
5431
5432         // We manually create the node configuration to backup the seed.
5433         let seed = [42; 32];
5434         let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
5435         let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager);
5436         let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[0].logger));
5437         let scorer = Mutex::new(test_utils::TestScorer::new());
5438         let router = test_utils::TestRouter::new(network_graph.clone(), &scorer);
5439         let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) };
5440         let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
5441         node_cfgs.remove(0);
5442         node_cfgs.insert(0, node);
5443
5444         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
5445         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
5446
5447         // Create some initial channels
5448         // Create a dummy channel to advance index by one and thus test re-derivation correctness
5449         // for node 0
5450         let chan_0 = create_announced_chan_between_nodes(&nodes, 0, 2);
5451         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5452         assert_ne!(chan_0.3.output[0].script_pubkey, chan_1.3.output[0].script_pubkey);
5453
5454         // Ensure all nodes are at the same height
5455         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
5456         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
5457         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
5458         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
5459
5460         let (_, our_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
5461         let local_txn_0 = get_local_commitment_txn!(nodes[0], chan_0.2);
5462         let local_txn_1 = get_local_commitment_txn!(nodes[0], chan_1.2);
5463         assert_eq!(local_txn_1[0].input.len(), 1);
5464         check_spends!(local_txn_1[0], chan_1.3);
5465
5466         // We check funding pubkey are unique
5467         let (from_0_funding_key_0, from_0_funding_key_1) = (PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][36..69]));
5468         let (from_1_funding_key_0, from_1_funding_key_1) = (PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][36..69]));
5469         if from_0_funding_key_0 == from_1_funding_key_0
5470             || from_0_funding_key_0 == from_1_funding_key_1
5471             || from_0_funding_key_1 == from_1_funding_key_0
5472             || from_0_funding_key_1 == from_1_funding_key_1 {
5473                 panic!("Funding pubkeys aren't unique");
5474         }
5475
5476         // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
5477         mine_transaction(&nodes[0], &local_txn_1[0]);
5478         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
5479         check_closed_broadcast!(nodes[0], true);
5480         check_added_monitors!(nodes[0], 1);
5481         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
5482
5483         let htlc_timeout = {
5484                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5485                 assert_eq!(node_txn.len(), 1);
5486                 assert_eq!(node_txn[0].input.len(), 1);
5487                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5488                 check_spends!(node_txn[0], local_txn_1[0]);
5489                 node_txn[0].clone()
5490         };
5491
5492         mine_transaction(&nodes[0], &htlc_timeout);
5493         connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
5494         expect_payment_failed!(nodes[0], our_payment_hash, false);
5495
5496         // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
5497         let new_keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
5498         let spend_txn = check_spendable_outputs!(nodes[0], new_keys_manager);
5499         assert_eq!(spend_txn.len(), 3);
5500         check_spends!(spend_txn[0], local_txn_1[0]);
5501         assert_eq!(spend_txn[1].input.len(), 1);
5502         check_spends!(spend_txn[1], htlc_timeout);
5503         assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5504         assert_eq!(spend_txn[2].input.len(), 2);
5505         check_spends!(spend_txn[2], local_txn_1[0], htlc_timeout);
5506         assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 ||
5507                 spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32);
5508 }
5509
5510 #[test]
5511 fn test_static_output_closing_tx() {
5512         let chanmon_cfgs = create_chanmon_cfgs(2);
5513         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5514         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5515         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5516
5517         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5518
5519         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
5520         let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
5521
5522         mine_transaction(&nodes[0], &closing_tx);
5523         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
5524         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
5525
5526         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
5527         assert_eq!(spend_txn.len(), 1);
5528         check_spends!(spend_txn[0], closing_tx);
5529
5530         mine_transaction(&nodes[1], &closing_tx);
5531         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
5532         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5533
5534         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5535         assert_eq!(spend_txn.len(), 1);
5536         check_spends!(spend_txn[0], closing_tx);
5537 }
5538
5539 fn do_htlc_claim_local_commitment_only(use_dust: bool) {
5540         let chanmon_cfgs = create_chanmon_cfgs(2);
5541         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5542         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5543         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5544         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5545
5546         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3_000_000 });
5547
5548         // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being
5549         // present in B's local commitment transaction, but none of A's commitment transactions.
5550         nodes[1].node.claim_funds(payment_preimage);
5551         check_added_monitors!(nodes[1], 1);
5552         expect_payment_claimed!(nodes[1], payment_hash, if use_dust { 50000 } else { 3_000_000 });
5553
5554         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5555         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
5556         expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
5557
5558         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
5559         check_added_monitors!(nodes[0], 1);
5560         let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5561         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0);
5562         check_added_monitors!(nodes[1], 1);
5563
5564         let starting_block = nodes[1].best_block_info();
5565         let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
5566         for _ in starting_block.1 + 1..TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + starting_block.1 + 2 {
5567                 connect_block(&nodes[1], &block);
5568                 block.header.prev_blockhash = block.block_hash();
5569         }
5570         test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
5571         check_closed_broadcast!(nodes[1], true);
5572         check_added_monitors!(nodes[1], 1);
5573         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
5574 }
5575
5576 fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
5577         let chanmon_cfgs = create_chanmon_cfgs(2);
5578         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5579         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5580         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5581         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5582
5583         let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], if use_dust { 50000 } else { 3000000 });
5584         nodes[0].node.send_payment_with_route(&route, payment_hash,
5585                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
5586         check_added_monitors!(nodes[0], 1);
5587
5588         let _as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5589
5590         // As far as A is concerned, the HTLC is now present only in the latest remote commitment
5591         // transaction, however it is not in A's latest local commitment, so we can just broadcast that
5592         // to "time out" the HTLC.
5593
5594         let starting_block = nodes[1].best_block_info();
5595         let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
5596
5597         for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + starting_block.1 + 2 {
5598                 connect_block(&nodes[0], &block);
5599                 block.header.prev_blockhash = block.block_hash();
5600         }
5601         test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
5602         check_closed_broadcast!(nodes[0], true);
5603         check_added_monitors!(nodes[0], 1);
5604         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
5605 }
5606
5607 fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
5608         let chanmon_cfgs = create_chanmon_cfgs(3);
5609         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
5610         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
5611         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
5612         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5613
5614         // Fail the payment, but don't deliver A's final RAA, resulting in the HTLC only being present
5615         // in B's previous (unrevoked) commitment transaction, but none of A's commitment transactions.
5616         // Also optionally test that we *don't* fail the channel in case the commitment transaction was
5617         // actually revoked.
5618         let htlc_value = if use_dust { 50000 } else { 3000000 };
5619         let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
5620         nodes[1].node.fail_htlc_backwards(&our_payment_hash);
5621         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
5622         check_added_monitors!(nodes[1], 1);
5623
5624         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5625         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
5626         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
5627         check_added_monitors!(nodes[0], 1);
5628         let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5629         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0);
5630         check_added_monitors!(nodes[1], 1);
5631         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.1);
5632         check_added_monitors!(nodes[1], 1);
5633         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
5634
5635         if check_revoke_no_close {
5636                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
5637                 check_added_monitors!(nodes[0], 1);
5638         }
5639
5640         let starting_block = nodes[1].best_block_info();
5641         let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
5642         for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 2 {
5643                 connect_block(&nodes[0], &block);
5644                 block.header.prev_blockhash = block.block_hash();
5645         }
5646         if !check_revoke_no_close {
5647                 test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
5648                 check_closed_broadcast!(nodes[0], true);
5649                 check_added_monitors!(nodes[0], 1);
5650                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
5651         } else {
5652                 expect_payment_failed!(nodes[0], our_payment_hash, true);
5653         }
5654 }
5655
5656 // Test that we close channels on-chain when broadcastable HTLCs reach their timeout window.
5657 // There are only a few cases to test here:
5658 //  * its not really normative behavior, but we test that below-dust HTLCs "included" in
5659 //    broadcastable commitment transactions result in channel closure,
5660 //  * its included in an unrevoked-but-previous remote commitment transaction,
5661 //  * its included in the latest remote or local commitment transactions.
5662 // We test each of the three possible commitment transactions individually and use both dust and
5663 // non-dust HTLCs.
5664 // Note that we don't bother testing both outbound and inbound HTLC failures for each case, and we
5665 // assume they are handled the same across all six cases, as both outbound and inbound failures are
5666 // tested for at least one of the cases in other tests.
5667 #[test]
5668 fn htlc_claim_single_commitment_only_a() {
5669         do_htlc_claim_local_commitment_only(true);
5670         do_htlc_claim_local_commitment_only(false);
5671
5672         do_htlc_claim_current_remote_commitment_only(true);
5673         do_htlc_claim_current_remote_commitment_only(false);
5674 }
5675
5676 #[test]
5677 fn htlc_claim_single_commitment_only_b() {
5678         do_htlc_claim_previous_remote_commitment_only(true, false);
5679         do_htlc_claim_previous_remote_commitment_only(false, false);
5680         do_htlc_claim_previous_remote_commitment_only(true, true);
5681         do_htlc_claim_previous_remote_commitment_only(false, true);
5682 }
5683
5684 #[test]
5685 #[should_panic]
5686 fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be on its own as we are catching a panic
5687         let chanmon_cfgs = create_chanmon_cfgs(2);
5688         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5689         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5690         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5691         // Force duplicate randomness for every get-random call
5692         for node in nodes.iter() {
5693                 *node.keys_manager.override_random_bytes.lock().unwrap() = Some([0; 32]);
5694         }
5695
5696         // BOLT #2 spec: Sending node must ensure temporary_channel_id is unique from any other channel ID with the same peer.
5697         let channel_value_satoshis=10000;
5698         let push_msat=10001;
5699         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).unwrap();
5700         let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5701         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel);
5702         get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
5703
5704         // Create a second channel with the same random values. This used to panic due to a colliding
5705         // channel_id, but now panics due to a colliding outbound SCID alias.
5706         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_err());
5707 }
5708
5709 #[test]
5710 fn bolt2_open_channel_sending_node_checks_part2() {
5711         let chanmon_cfgs = create_chanmon_cfgs(2);
5712         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5713         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5714         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5715
5716         // BOLT #2 spec: Sending node must set funding_satoshis to less than 2^24 satoshis
5717         let channel_value_satoshis=2^24;
5718         let push_msat=10001;
5719         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_err());
5720
5721         // BOLT #2 spec: Sending node must set push_msat to equal or less than 1000 * funding_satoshis
5722         let channel_value_satoshis=10000;
5723         // Test when push_msat is equal to 1000 * funding_satoshis.
5724         let push_msat=1000*channel_value_satoshis+1;
5725         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_err());
5726
5727         // BOLT #2 spec: Sending node must set set channel_reserve_satoshis greater than or equal to dust_limit_satoshis
5728         let channel_value_satoshis=10000;
5729         let push_msat=10001;
5730         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_ok()); //Create a valid channel
5731         let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5732         assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.dust_limit_satoshis);
5733
5734         // BOLT #2 spec: Sending node must set undefined bits in channel_flags to 0
5735         // Only the least-significant bit of channel_flags is currently defined resulting in channel_flags only having one of two possible states 0 or 1
5736         assert!(node0_to_1_send_open_channel.channel_flags<=1);
5737
5738         // BOLT #2 spec: Sending node should set to_self_delay sufficient to ensure the sender can irreversibly spend a commitment transaction output, in case of misbehaviour by the receiver.
5739         assert!(BREAKDOWN_TIMEOUT>0);
5740         assert!(node0_to_1_send_open_channel.to_self_delay==BREAKDOWN_TIMEOUT);
5741
5742         // BOLT #2 spec: Sending node must ensure the chain_hash value identifies the chain it wishes to open the channel within.
5743         let chain_hash=genesis_block(Network::Testnet).header.block_hash();
5744         assert_eq!(node0_to_1_send_open_channel.chain_hash,chain_hash);
5745
5746         // BOLT #2 spec: Sending node must set funding_pubkey, revocation_basepoint, htlc_basepoint, payment_basepoint, and delayed_payment_basepoint to valid DER-encoded, compressed, secp256k1 pubkeys.
5747         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.funding_pubkey.serialize()).is_ok());
5748         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.revocation_basepoint.serialize()).is_ok());
5749         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.htlc_basepoint.serialize()).is_ok());
5750         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.payment_point.serialize()).is_ok());
5751         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.delayed_payment_basepoint.serialize()).is_ok());
5752 }
5753
5754 #[test]
5755 fn bolt2_open_channel_sane_dust_limit() {
5756         let chanmon_cfgs = create_chanmon_cfgs(2);
5757         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5758         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5759         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5760
5761         let channel_value_satoshis=1000000;
5762         let push_msat=10001;
5763         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).unwrap();
5764         let mut node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5765         node0_to_1_send_open_channel.dust_limit_satoshis = 547;
5766         node0_to_1_send_open_channel.channel_reserve_satoshis = 100001;
5767
5768         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel);
5769         let events = nodes[1].node.get_and_clear_pending_msg_events();
5770         let err_msg = match events[0] {
5771                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
5772                         msg.clone()
5773                 },
5774                 _ => panic!("Unexpected event"),
5775         };
5776         assert_eq!(err_msg.data, "dust_limit_satoshis (547) is greater than the implementation limit (546)");
5777 }
5778
5779 // Test that if we fail to send an HTLC that is being freed from the holding cell, and the HTLC
5780 // originated from our node, its failure is surfaced to the user. We trigger this failure to
5781 // free the HTLC by increasing our fee while the HTLC is in the holding cell such that the HTLC
5782 // is no longer affordable once it's freed.
5783 #[test]
5784 fn test_fail_holding_cell_htlc_upon_free() {
5785         let chanmon_cfgs = create_chanmon_cfgs(2);
5786         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5787         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5788         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5789         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
5790
5791         // First nodes[0] generates an update_fee, setting the channel's
5792         // pending_update_fee.
5793         {
5794                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
5795                 *feerate_lock += 20;
5796         }
5797         nodes[0].node.timer_tick_occurred();
5798         check_added_monitors!(nodes[0], 1);
5799
5800         let events = nodes[0].node.get_and_clear_pending_msg_events();
5801         assert_eq!(events.len(), 1);
5802         let (update_msg, commitment_signed) = match events[0] {
5803                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
5804                         (update_fee.as_ref(), commitment_signed)
5805                 },
5806                 _ => panic!("Unexpected event"),
5807         };
5808
5809         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
5810
5811         let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5812         let channel_reserve = chan_stat.channel_reserve_msat;
5813         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
5814         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
5815
5816         // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
5817         let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
5818         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
5819
5820         // Send a payment which passes reserve checks but gets stuck in the holding cell.
5821         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
5822                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
5823         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5824         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send);
5825
5826         // Flush the pending fee update.
5827         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
5828         let (as_revoke_and_ack, _) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5829         check_added_monitors!(nodes[1], 1);
5830         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack);
5831         check_added_monitors!(nodes[0], 1);
5832
5833         // Upon receipt of the RAA, there will be an attempt to resend the holding cell
5834         // HTLC, but now that the fee has been raised the payment will now fail, causing
5835         // us to surface its failure to the user.
5836         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5837         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
5838         nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", hex::encode(chan.2)), 1);
5839
5840         // Check that the payment failed to be sent out.
5841         let events = nodes[0].node.get_and_clear_pending_events();
5842         assert_eq!(events.len(), 2);
5843         match &events[0] {
5844                 &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => {
5845                         assert_eq!(PaymentId(our_payment_hash.0), *payment_id.as_ref().unwrap());
5846                         assert_eq!(our_payment_hash.clone(), *payment_hash);
5847                         assert_eq!(*payment_failed_permanently, false);
5848                         assert_eq!(*short_channel_id, Some(route.paths[0].hops[0].short_channel_id));
5849                 },
5850                 _ => panic!("Unexpected event"),
5851         }
5852         match &events[1] {
5853                 &Event::PaymentFailed { ref payment_hash, .. } => {
5854                         assert_eq!(our_payment_hash.clone(), *payment_hash);
5855                 },
5856                 _ => panic!("Unexpected event"),
5857         }
5858 }
5859
5860 // Test that if multiple HTLCs are released from the holding cell and one is
5861 // valid but the other is no longer valid upon release, the valid HTLC can be
5862 // successfully completed while the other one fails as expected.
5863 #[test]
5864 fn test_free_and_fail_holding_cell_htlcs() {
5865         let chanmon_cfgs = create_chanmon_cfgs(2);
5866         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5867         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5868         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5869         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
5870
5871         // First nodes[0] generates an update_fee, setting the channel's
5872         // pending_update_fee.
5873         {
5874                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
5875                 *feerate_lock += 200;
5876         }
5877         nodes[0].node.timer_tick_occurred();
5878         check_added_monitors!(nodes[0], 1);
5879
5880         let events = nodes[0].node.get_and_clear_pending_msg_events();
5881         assert_eq!(events.len(), 1);
5882         let (update_msg, commitment_signed) = match events[0] {
5883                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
5884                         (update_fee.as_ref(), commitment_signed)
5885                 },
5886                 _ => panic!("Unexpected event"),
5887         };
5888
5889         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
5890
5891         let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5892         let channel_reserve = chan_stat.channel_reserve_msat;
5893         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
5894         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
5895
5896         // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
5897         let amt_1 = 20000;
5898         let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features) - amt_1;
5899         let (route_1, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_1);
5900         let (route_2, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_2);
5901
5902         // Send 2 payments which pass reserve checks but get stuck in the holding cell.
5903         nodes[0].node.send_payment_with_route(&route_1, payment_hash_1,
5904                 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
5905         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5906         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1);
5907         let payment_id_2 = PaymentId(nodes[0].keys_manager.get_secure_random_bytes());
5908         nodes[0].node.send_payment_with_route(&route_2, payment_hash_2,
5909                 RecipientOnionFields::secret_only(payment_secret_2), payment_id_2).unwrap();
5910         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5911         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1 + amt_2);
5912
5913         // Flush the pending fee update.
5914         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
5915         let (revoke_and_ack, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5916         check_added_monitors!(nodes[1], 1);
5917         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_and_ack);
5918         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
5919         check_added_monitors!(nodes[0], 2);
5920
5921         // Upon receipt of the RAA, there will be an attempt to resend the holding cell HTLCs,
5922         // but now that the fee has been raised the second payment will now fail, causing us
5923         // to surface its failure to the user. The first payment should succeed.
5924         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5925         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
5926         nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", hex::encode(chan.2)), 1);
5927
5928         // Check that the second payment failed to be sent out.
5929         let events = nodes[0].node.get_and_clear_pending_events();
5930         assert_eq!(events.len(), 2);
5931         match &events[0] {
5932                 &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => {
5933                         assert_eq!(payment_id_2, *payment_id.as_ref().unwrap());
5934                         assert_eq!(payment_hash_2.clone(), *payment_hash);
5935                         assert_eq!(*payment_failed_permanently, false);
5936                         assert_eq!(*short_channel_id, Some(route_2.paths[0].hops[0].short_channel_id));
5937                 },
5938                 _ => panic!("Unexpected event"),
5939         }
5940         match &events[1] {
5941                 &Event::PaymentFailed { ref payment_hash, .. } => {
5942                         assert_eq!(payment_hash_2.clone(), *payment_hash);
5943                 },
5944                 _ => panic!("Unexpected event"),
5945         }
5946
5947         // Complete the first payment and the RAA from the fee update.
5948         let (payment_event, send_raa_event) = {
5949                 let mut msgs = nodes[0].node.get_and_clear_pending_msg_events();
5950                 assert_eq!(msgs.len(), 2);
5951                 (SendEvent::from_event(msgs.remove(0)), msgs.remove(0))
5952         };
5953         let raa = match send_raa_event {
5954                 MessageSendEvent::SendRevokeAndACK { msg, .. } => msg,
5955                 _ => panic!("Unexpected event"),
5956         };
5957         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
5958         check_added_monitors!(nodes[1], 1);
5959         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
5960         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
5961         let events = nodes[1].node.get_and_clear_pending_events();
5962         assert_eq!(events.len(), 1);
5963         match events[0] {
5964                 Event::PendingHTLCsForwardable { .. } => {},
5965                 _ => panic!("Unexpected event"),
5966         }
5967         nodes[1].node.process_pending_htlc_forwards();
5968         let events = nodes[1].node.get_and_clear_pending_events();
5969         assert_eq!(events.len(), 1);
5970         match events[0] {
5971                 Event::PaymentClaimable { .. } => {},
5972                 _ => panic!("Unexpected event"),
5973         }
5974         nodes[1].node.claim_funds(payment_preimage_1);
5975         check_added_monitors!(nodes[1], 1);
5976         expect_payment_claimed!(nodes[1], payment_hash_1, amt_1);
5977
5978         let update_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5979         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msgs.update_fulfill_htlcs[0]);
5980         commitment_signed_dance!(nodes[0], nodes[1], update_msgs.commitment_signed, false, true);
5981         expect_payment_sent!(nodes[0], payment_preimage_1);
5982 }
5983
5984 // Test that if we fail to forward an HTLC that is being freed from the holding cell that the
5985 // HTLC is failed backwards. We trigger this failure to forward the freed HTLC by increasing
5986 // our fee while the HTLC is in the holding cell such that the HTLC is no longer affordable
5987 // once it's freed.
5988 #[test]
5989 fn test_fail_holding_cell_htlc_upon_free_multihop() {
5990         let chanmon_cfgs = create_chanmon_cfgs(3);
5991         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
5992         // Avoid having to include routing fees in calculations
5993         let mut config = test_default_channel_config();
5994         config.channel_config.forwarding_fee_base_msat = 0;
5995         config.channel_config.forwarding_fee_proportional_millionths = 0;
5996         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
5997         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
5998         let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
5999         let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000);
6000
6001         // First nodes[1] generates an update_fee, setting the channel's
6002         // pending_update_fee.
6003         {
6004                 let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
6005                 *feerate_lock += 20;
6006         }
6007         nodes[1].node.timer_tick_occurred();
6008         check_added_monitors!(nodes[1], 1);
6009
6010         let events = nodes[1].node.get_and_clear_pending_msg_events();
6011         assert_eq!(events.len(), 1);
6012         let (update_msg, commitment_signed) = match events[0] {
6013                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
6014                         (update_fee.as_ref(), commitment_signed)
6015                 },
6016                 _ => panic!("Unexpected event"),
6017         };
6018
6019         nodes[2].node.handle_update_fee(&nodes[1].node.get_our_node_id(), update_msg.unwrap());
6020
6021         let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan_0_1.2);
6022         let channel_reserve = chan_stat.channel_reserve_msat;
6023         let feerate = get_feerate!(nodes[0], nodes[1], chan_0_1.2);
6024         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_0_1.2);
6025
6026         // Send a payment which passes reserve checks but gets stuck in the holding cell.
6027         let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
6028         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], max_can_send);
6029         let payment_event = {
6030                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6031                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6032                 check_added_monitors!(nodes[0], 1);
6033
6034                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6035                 assert_eq!(events.len(), 1);
6036
6037                 SendEvent::from_event(events.remove(0))
6038         };
6039         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6040         check_added_monitors!(nodes[1], 0);
6041         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6042         expect_pending_htlcs_forwardable!(nodes[1]);
6043
6044         chan_stat = get_channel_value_stat!(nodes[1], nodes[2], chan_1_2.2);
6045         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send);
6046
6047         // Flush the pending fee update.
6048         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
6049         let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
6050         check_added_monitors!(nodes[2], 1);
6051         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &raa);
6052         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &commitment_signed);
6053         check_added_monitors!(nodes[1], 2);
6054
6055         // A final RAA message is generated to finalize the fee update.
6056         let events = nodes[1].node.get_and_clear_pending_msg_events();
6057         assert_eq!(events.len(), 1);
6058
6059         let raa_msg = match &events[0] {
6060                 &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => {
6061                         msg.clone()
6062                 },
6063                 _ => panic!("Unexpected event"),
6064         };
6065
6066         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa_msg);
6067         check_added_monitors!(nodes[2], 1);
6068         assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
6069
6070         // nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process.
6071         let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events();
6072         assert_eq!(process_htlc_forwards_event.len(), 2);
6073         match &process_htlc_forwards_event[0] {
6074                 &Event::PendingHTLCsForwardable { .. } => {},
6075                 _ => panic!("Unexpected event"),
6076         }
6077
6078         // In response, we call ChannelManager's process_pending_htlc_forwards
6079         nodes[1].node.process_pending_htlc_forwards();
6080         check_added_monitors!(nodes[1], 1);
6081
6082         // This causes the HTLC to be failed backwards.
6083         let fail_event = nodes[1].node.get_and_clear_pending_msg_events();
6084         assert_eq!(fail_event.len(), 1);
6085         let (fail_msg, commitment_signed) = match &fail_event[0] {
6086                 &MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
6087                         assert_eq!(updates.update_add_htlcs.len(), 0);
6088                         assert_eq!(updates.update_fulfill_htlcs.len(), 0);
6089                         assert_eq!(updates.update_fail_malformed_htlcs.len(), 0);
6090                         assert_eq!(updates.update_fail_htlcs.len(), 1);
6091                         (updates.update_fail_htlcs[0].clone(), updates.commitment_signed.clone())
6092                 },
6093                 _ => panic!("Unexpected event"),
6094         };
6095
6096         // Pass the failure messages back to nodes[0].
6097         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg);
6098         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
6099
6100         // Complete the HTLC failure+removal process.
6101         let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6102         check_added_monitors!(nodes[0], 1);
6103         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
6104         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed);
6105         check_added_monitors!(nodes[1], 2);
6106         let final_raa_event = nodes[1].node.get_and_clear_pending_msg_events();
6107         assert_eq!(final_raa_event.len(), 1);
6108         let raa = match &final_raa_event[0] {
6109                 &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => msg.clone(),
6110                 _ => panic!("Unexpected event"),
6111         };
6112         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa);
6113         expect_payment_failed_with_update!(nodes[0], our_payment_hash, false, chan_1_2.0.contents.short_channel_id, false);
6114         check_added_monitors!(nodes[0], 1);
6115 }
6116
6117 // BOLT 2 Requirements for the Sender when constructing and sending an update_add_htlc message.
6118 // BOLT 2 Requirement: MUST NOT offer amount_msat it cannot pay for in the remote commitment transaction at the current feerate_per_kw (see "Updating Fees") while maintaining its channel reserve.
6119 //TODO: I don't believe this is explicitly enforced when sending an HTLC but as the Fee aspect of the BOLT specs is in flux leaving this as a TODO.
6120
6121 #[test]
6122 fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() {
6123         //BOLT2 Requirement: MUST NOT offer amount_msat below the receiving node's htlc_minimum_msat (same validation check catches both of these)
6124         let chanmon_cfgs = create_chanmon_cfgs(2);
6125         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6126         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6127         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6128         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6129
6130         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6131         route.paths[0].hops[0].fee_msat = 100;
6132
6133         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6134                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6135                 ), true, APIError::ChannelUnavailable { .. }, {});
6136         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6137 }
6138
6139 #[test]
6140 fn test_update_add_htlc_bolt2_sender_zero_value_msat() {
6141         //BOLT2 Requirement: MUST offer amount_msat greater than 0.
6142         let chanmon_cfgs = create_chanmon_cfgs(2);
6143         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6144         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6145         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6146         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6147
6148         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6149         route.paths[0].hops[0].fee_msat = 0;
6150         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6151                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)),
6152                 true, APIError::ChannelUnavailable { ref err },
6153                 assert_eq!(err, "Cannot send 0-msat HTLC"));
6154
6155         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6156         nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send 0-msat HTLC", 1);
6157 }
6158
6159 #[test]
6160 fn test_update_add_htlc_bolt2_receiver_zero_value_msat() {
6161         //BOLT2 Requirement: MUST offer amount_msat greater than 0.
6162         let chanmon_cfgs = create_chanmon_cfgs(2);
6163         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6164         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6165         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6166         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6167
6168         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6169         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6170                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6171         check_added_monitors!(nodes[0], 1);
6172         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6173         updates.update_add_htlcs[0].amount_msat = 0;
6174
6175         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6176         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1);
6177         check_closed_broadcast!(nodes[1], true).unwrap();
6178         check_added_monitors!(nodes[1], 1);
6179         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() },
6180                 [nodes[0].node.get_our_node_id()], 100000);
6181 }
6182
6183 #[test]
6184 fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() {
6185         //BOLT 2 Requirement: MUST set cltv_expiry less than 500000000.
6186         //It is enforced when constructing a route.
6187         let chanmon_cfgs = create_chanmon_cfgs(2);
6188         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6189         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6190         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6191         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
6192
6193         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0)
6194                 .with_bolt11_features(nodes[1].node.invoice_features()).unwrap();
6195         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000);
6196         route.paths[0].hops.last_mut().unwrap().cltv_expiry_delta = 500000001;
6197         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6198                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6199                 ), true, APIError::InvalidRoute { ref err },
6200                 assert_eq!(err, &"Channel CLTV overflowed?"));
6201 }
6202
6203 #[test]
6204 fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() {
6205         //BOLT 2 Requirement: if result would be offering more than the remote's max_accepted_htlcs HTLCs, in the remote commitment transaction: MUST NOT add an HTLC.
6206         //BOLT 2 Requirement: for the first HTLC it offers MUST set id to 0.
6207         //BOLT 2 Requirement: MUST increase the value of id by 1 for each successive offer.
6208         let chanmon_cfgs = create_chanmon_cfgs(2);
6209         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6210         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6211         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6212         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
6213         let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
6214                 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.counterparty_max_accepted_htlcs as u64;
6215
6216         // Fetch a route in advance as we will be unable to once we're unable to send.
6217         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6218         for i in 0..max_accepted_htlcs {
6219                 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6220                 let payment_event = {
6221                         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6222                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6223                         check_added_monitors!(nodes[0], 1);
6224
6225                         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6226                         assert_eq!(events.len(), 1);
6227                         if let MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate{ update_add_htlcs: ref htlcs, .. }, } = events[0] {
6228                                 assert_eq!(htlcs[0].htlc_id, i);
6229                         } else {
6230                                 assert!(false);
6231                         }
6232                         SendEvent::from_event(events.remove(0))
6233                 };
6234                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6235                 check_added_monitors!(nodes[1], 0);
6236                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6237
6238                 expect_pending_htlcs_forwardable!(nodes[1]);
6239                 expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 100000);
6240         }
6241         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6242                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6243                 ), true, APIError::ChannelUnavailable { .. }, {});
6244
6245         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6246 }
6247
6248 #[test]
6249 fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() {
6250         //BOLT 2 Requirement: if the sum of total offered HTLCs would exceed the remote's max_htlc_value_in_flight_msat: MUST NOT add an HTLC.
6251         let chanmon_cfgs = create_chanmon_cfgs(2);
6252         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6253         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6254         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6255         let channel_value = 100000;
6256         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 0);
6257         let max_in_flight = get_channel_value_stat!(nodes[0], nodes[1], chan.2).counterparty_max_htlc_value_in_flight_msat;
6258
6259         send_payment(&nodes[0], &vec!(&nodes[1])[..], max_in_flight);
6260
6261         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_in_flight);
6262         // Manually create a route over our max in flight (which our router normally automatically
6263         // limits us to.
6264         route.paths[0].hops[0].fee_msat =  max_in_flight + 1;
6265         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6266                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6267                 ), true, APIError::ChannelUnavailable { .. }, {});
6268         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6269
6270         send_payment(&nodes[0], &[&nodes[1]], max_in_flight);
6271 }
6272
6273 // BOLT 2 Requirements for the Receiver when handling an update_add_htlc message.
6274 #[test]
6275 fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
6276         //BOLT2 Requirement: receiving an amount_msat equal to 0, OR less than its own htlc_minimum_msat -> SHOULD fail the channel.
6277         let chanmon_cfgs = create_chanmon_cfgs(2);
6278         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6279         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6280         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6281         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6282         let htlc_minimum_msat: u64;
6283         {
6284                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
6285                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
6286                 let channel = chan_lock.channel_by_id.get(&chan.2).unwrap();
6287                 htlc_minimum_msat = channel.context.get_holder_htlc_minimum_msat();
6288         }
6289
6290         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat);
6291         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6292                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6293         check_added_monitors!(nodes[0], 1);
6294         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6295         updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat-1;
6296         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6297         assert!(nodes[1].node.list_channels().is_empty());
6298         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6299         assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
6300         check_added_monitors!(nodes[1], 1);
6301         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6302 }
6303
6304 #[test]
6305 fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
6306         //BOLT2 Requirement: receiving an amount_msat that the sending node cannot afford at the current feerate_per_kw (while maintaining its channel reserve): SHOULD fail the channel
6307         let chanmon_cfgs = create_chanmon_cfgs(2);
6308         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6309         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6310         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6311         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6312
6313         let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6314         let channel_reserve = chan_stat.channel_reserve_msat;
6315         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
6316         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
6317         // The 2* and +1 are for the fee spike reserve.
6318         let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
6319
6320         let max_can_send = 5000000 - channel_reserve - commit_tx_fee_outbound;
6321         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
6322         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6323                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6324         check_added_monitors!(nodes[0], 1);
6325         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6326
6327         // Even though channel-initiator senders are required to respect the fee_spike_reserve,
6328         // at this time channel-initiatee receivers are not required to enforce that senders
6329         // respect the fee_spike_reserve.
6330         updates.update_add_htlcs[0].amount_msat = max_can_send + commit_tx_fee_outbound + 1;
6331         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6332
6333         assert!(nodes[1].node.list_channels().is_empty());
6334         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6335         assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
6336         check_added_monitors!(nodes[1], 1);
6337         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6338 }
6339
6340 #[test]
6341 fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
6342         //BOLT 2 Requirement: if a sending node adds more than its max_accepted_htlcs HTLCs to its local commitment transaction: SHOULD fail the channel
6343         //BOLT 2 Requirement: MUST allow multiple HTLCs with the same payment_hash.
6344         let chanmon_cfgs = create_chanmon_cfgs(2);
6345         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6346         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6347         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6348         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6349
6350         let send_amt = 3999999;
6351         let (mut route, our_payment_hash, _, our_payment_secret) =
6352                 get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
6353         route.paths[0].hops[0].fee_msat = send_amt;
6354         let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
6355         let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1;
6356         let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route.paths[0], &session_priv).unwrap();
6357         let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
6358                 &route.paths[0], send_amt, RecipientOnionFields::secret_only(our_payment_secret), cur_height, &None).unwrap();
6359         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap();
6360
6361         let mut msg = msgs::UpdateAddHTLC {
6362                 channel_id: chan.2,
6363                 htlc_id: 0,
6364                 amount_msat: 1000,
6365                 payment_hash: our_payment_hash,
6366                 cltv_expiry: htlc_cltv,
6367                 onion_routing_packet: onion_packet.clone(),
6368                 skimmed_fee_msat: None,
6369         };
6370
6371         for i in 0..50 {
6372                 msg.htlc_id = i as u64;
6373                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
6374         }
6375         msg.htlc_id = (50) as u64;
6376         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
6377
6378         assert!(nodes[1].node.list_channels().is_empty());
6379         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6380         assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
6381         check_added_monitors!(nodes[1], 1);
6382         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6383 }
6384
6385 #[test]
6386 fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() {
6387         //OR adds more than its max_htlc_value_in_flight_msat worth of offered HTLCs to its local commitment transaction: SHOULD fail the channel
6388         let chanmon_cfgs = create_chanmon_cfgs(2);
6389         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6390         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6391         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6392         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6393
6394         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6395         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6396                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6397         check_added_monitors!(nodes[0], 1);
6398         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6399         updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], nodes[0], chan.2).counterparty_max_htlc_value_in_flight_msat + 1;
6400         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6401
6402         assert!(nodes[1].node.list_channels().is_empty());
6403         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6404         assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str()));
6405         check_added_monitors!(nodes[1], 1);
6406         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 1000000);
6407 }
6408
6409 #[test]
6410 fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() {
6411         //BOLT2 Requirement: if sending node sets cltv_expiry to greater or equal to 500000000: SHOULD fail the channel.
6412         let chanmon_cfgs = create_chanmon_cfgs(2);
6413         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6414         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6415         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6416
6417         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6418         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6419         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6420                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6421         check_added_monitors!(nodes[0], 1);
6422         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6423         updates.update_add_htlcs[0].cltv_expiry = 500000000;
6424         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6425
6426         assert!(nodes[1].node.list_channels().is_empty());
6427         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6428         assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height");
6429         check_added_monitors!(nodes[1], 1);
6430         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6431 }
6432
6433 #[test]
6434 fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
6435         //BOLT 2 requirement: if the sender did not previously acknowledge the commitment of that HTLC: MUST ignore a repeated id value after a reconnection.
6436         // We test this by first testing that that repeated HTLCs pass commitment signature checks
6437         // after disconnect and that non-sequential htlc_ids result in a channel failure.
6438         let chanmon_cfgs = create_chanmon_cfgs(2);
6439         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6440         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6441         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6442
6443         create_announced_chan_between_nodes(&nodes, 0, 1);
6444         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6445         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6446                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6447         check_added_monitors!(nodes[0], 1);
6448         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6449         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6450
6451         //Disconnect and Reconnect
6452         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
6453         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
6454         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
6455                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
6456         }, true).unwrap();
6457         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
6458         assert_eq!(reestablish_1.len(), 1);
6459         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
6460                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
6461         }, false).unwrap();
6462         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
6463         assert_eq!(reestablish_2.len(), 1);
6464         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
6465         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
6466         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
6467         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
6468
6469         //Resend HTLC
6470         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6471         assert_eq!(updates.commitment_signed.htlc_signatures.len(), 1);
6472         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed);
6473         check_added_monitors!(nodes[1], 1);
6474         let _bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6475
6476         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6477
6478         assert!(nodes[1].node.list_channels().is_empty());
6479         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6480         assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str()));
6481         check_added_monitors!(nodes[1], 1);
6482         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6483 }
6484
6485 #[test]
6486 fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() {
6487         //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6488
6489         let chanmon_cfgs = create_chanmon_cfgs(2);
6490         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6491         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6492         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6493         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6494         let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6495         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6496                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6497
6498         check_added_monitors!(nodes[0], 1);
6499         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6500         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6501
6502         let update_msg = msgs::UpdateFulfillHTLC{
6503                 channel_id: chan.2,
6504                 htlc_id: 0,
6505                 payment_preimage: our_payment_preimage,
6506         };
6507
6508         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6509
6510         assert!(nodes[0].node.list_channels().is_empty());
6511         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6512         assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6513         check_added_monitors!(nodes[0], 1);
6514         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6515 }
6516
6517 #[test]
6518 fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() {
6519         //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6520
6521         let chanmon_cfgs = create_chanmon_cfgs(2);
6522         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6523         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6524         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6525         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6526
6527         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6528         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6529                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6530         check_added_monitors!(nodes[0], 1);
6531         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6532         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6533
6534         let update_msg = msgs::UpdateFailHTLC{
6535                 channel_id: chan.2,
6536                 htlc_id: 0,
6537                 reason: msgs::OnionErrorPacket { data: Vec::new()},
6538         };
6539
6540         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6541
6542         assert!(nodes[0].node.list_channels().is_empty());
6543         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6544         assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6545         check_added_monitors!(nodes[0], 1);
6546         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6547 }
6548
6549 #[test]
6550 fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() {
6551         //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6552
6553         let chanmon_cfgs = create_chanmon_cfgs(2);
6554         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6555         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6556         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6557         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6558
6559         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6560         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6561                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6562         check_added_monitors!(nodes[0], 1);
6563         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6564         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6565         let update_msg = msgs::UpdateFailMalformedHTLC{
6566                 channel_id: chan.2,
6567                 htlc_id: 0,
6568                 sha256_of_onion: [1; 32],
6569                 failure_code: 0x8000,
6570         };
6571
6572         nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6573
6574         assert!(nodes[0].node.list_channels().is_empty());
6575         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6576         assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6577         check_added_monitors!(nodes[0], 1);
6578         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6579 }
6580
6581 #[test]
6582 fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
6583         //BOLT 2 Requirement: A receiving node: if the id does not correspond to an HTLC in its current commitment transaction MUST fail the channel.
6584
6585         let chanmon_cfgs = create_chanmon_cfgs(2);
6586         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6587         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6588         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6589         create_announced_chan_between_nodes(&nodes, 0, 1);
6590
6591         let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
6592
6593         nodes[1].node.claim_funds(our_payment_preimage);
6594         check_added_monitors!(nodes[1], 1);
6595         expect_payment_claimed!(nodes[1], our_payment_hash, 100_000);
6596
6597         let events = nodes[1].node.get_and_clear_pending_msg_events();
6598         assert_eq!(events.len(), 1);
6599         let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
6600                 match events[0] {
6601                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6602                                 assert!(update_add_htlcs.is_empty());
6603                                 assert_eq!(update_fulfill_htlcs.len(), 1);
6604                                 assert!(update_fail_htlcs.is_empty());
6605                                 assert!(update_fail_malformed_htlcs.is_empty());
6606                                 assert!(update_fee.is_none());
6607                                 update_fulfill_htlcs[0].clone()
6608                         },
6609                         _ => panic!("Unexpected event"),
6610                 }
6611         };
6612
6613         update_fulfill_msg.htlc_id = 1;
6614
6615         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
6616
6617         assert!(nodes[0].node.list_channels().is_empty());
6618         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6619         assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find");
6620         check_added_monitors!(nodes[0], 1);
6621         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6622 }
6623
6624 #[test]
6625 fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
6626         //BOLT 2 Requirement: A receiving node: if the payment_preimage value in update_fulfill_htlc doesn't SHA256 hash to the corresponding HTLC payment_hash MUST fail the channel.
6627
6628         let chanmon_cfgs = create_chanmon_cfgs(2);
6629         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6630         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6631         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6632         create_announced_chan_between_nodes(&nodes, 0, 1);
6633
6634         let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
6635
6636         nodes[1].node.claim_funds(our_payment_preimage);
6637         check_added_monitors!(nodes[1], 1);
6638         expect_payment_claimed!(nodes[1], our_payment_hash, 100_000);
6639
6640         let events = nodes[1].node.get_and_clear_pending_msg_events();
6641         assert_eq!(events.len(), 1);
6642         let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
6643                 match events[0] {
6644                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6645                                 assert!(update_add_htlcs.is_empty());
6646                                 assert_eq!(update_fulfill_htlcs.len(), 1);
6647                                 assert!(update_fail_htlcs.is_empty());
6648                                 assert!(update_fail_malformed_htlcs.is_empty());
6649                                 assert!(update_fee.is_none());
6650                                 update_fulfill_htlcs[0].clone()
6651                         },
6652                         _ => panic!("Unexpected event"),
6653                 }
6654         };
6655
6656         update_fulfill_msg.payment_preimage = PaymentPreimage([1; 32]);
6657
6658         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
6659
6660         assert!(nodes[0].node.list_channels().is_empty());
6661         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6662         assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str()));
6663         check_added_monitors!(nodes[0], 1);
6664         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6665 }
6666
6667 #[test]
6668 fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_message() {
6669         //BOLT 2 Requirement: A receiving node: if the BADONION bit in failure_code is not set for update_fail_malformed_htlc MUST fail the channel.
6670
6671         let chanmon_cfgs = create_chanmon_cfgs(2);
6672         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6673         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6674         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6675         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6676
6677         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6678         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6679                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6680         check_added_monitors!(nodes[0], 1);
6681
6682         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6683         updates.update_add_htlcs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
6684
6685         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6686         check_added_monitors!(nodes[1], 0);
6687         commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true);
6688
6689         let events = nodes[1].node.get_and_clear_pending_msg_events();
6690
6691         let mut update_msg: msgs::UpdateFailMalformedHTLC = {
6692                 match events[0] {
6693                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6694                                 assert!(update_add_htlcs.is_empty());
6695                                 assert!(update_fulfill_htlcs.is_empty());
6696                                 assert!(update_fail_htlcs.is_empty());
6697                                 assert_eq!(update_fail_malformed_htlcs.len(), 1);
6698                                 assert!(update_fee.is_none());
6699                                 update_fail_malformed_htlcs[0].clone()
6700                         },
6701                         _ => panic!("Unexpected event"),
6702                 }
6703         };
6704         update_msg.failure_code &= !0x8000;
6705         nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6706
6707         assert!(nodes[0].node.list_channels().is_empty());
6708         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6709         assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set");
6710         check_added_monitors!(nodes[0], 1);
6711         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 1000000);
6712 }
6713
6714 #[test]
6715 fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_update_fail_htlc() {
6716         //BOLT 2 Requirement: a receiving node which has an outgoing HTLC canceled by update_fail_malformed_htlc:
6717         //    * MUST return an error in the update_fail_htlc sent to the link which originally sent the HTLC, using the failure_code given and setting the data to sha256_of_onion.
6718
6719         let chanmon_cfgs = create_chanmon_cfgs(3);
6720         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6721         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
6722         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6723         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6724         let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000);
6725
6726         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
6727
6728         //First hop
6729         let mut payment_event = {
6730                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6731                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6732                 check_added_monitors!(nodes[0], 1);
6733                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6734                 assert_eq!(events.len(), 1);
6735                 SendEvent::from_event(events.remove(0))
6736         };
6737         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6738         check_added_monitors!(nodes[1], 0);
6739         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6740         expect_pending_htlcs_forwardable!(nodes[1]);
6741         let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
6742         assert_eq!(events_2.len(), 1);
6743         check_added_monitors!(nodes[1], 1);
6744         payment_event = SendEvent::from_event(events_2.remove(0));
6745         assert_eq!(payment_event.msgs.len(), 1);
6746
6747         //Second Hop
6748         payment_event.msgs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
6749         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
6750         check_added_monitors!(nodes[2], 0);
6751         commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true);
6752
6753         let events_3 = nodes[2].node.get_and_clear_pending_msg_events();
6754         assert_eq!(events_3.len(), 1);
6755         let update_msg : (msgs::UpdateFailMalformedHTLC, msgs::CommitmentSigned) = {
6756                 match events_3[0] {
6757                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
6758                                 assert!(update_add_htlcs.is_empty());
6759                                 assert!(update_fulfill_htlcs.is_empty());
6760                                 assert!(update_fail_htlcs.is_empty());
6761                                 assert_eq!(update_fail_malformed_htlcs.len(), 1);
6762                                 assert!(update_fee.is_none());
6763                                 (update_fail_malformed_htlcs[0].clone(), commitment_signed.clone())
6764                         },
6765                         _ => panic!("Unexpected event"),
6766                 }
6767         };
6768
6769         nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), &update_msg.0);
6770
6771         check_added_monitors!(nodes[1], 0);
6772         commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true);
6773         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
6774         let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
6775         assert_eq!(events_4.len(), 1);
6776
6777         //Confirm that handlinge the update_malformed_htlc message produces an update_fail_htlc message to be forwarded back along the route
6778         match events_4[0] {
6779                 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6780                         assert!(update_add_htlcs.is_empty());
6781                         assert!(update_fulfill_htlcs.is_empty());
6782                         assert_eq!(update_fail_htlcs.len(), 1);
6783                         assert!(update_fail_malformed_htlcs.is_empty());
6784                         assert!(update_fee.is_none());
6785                 },
6786                 _ => panic!("Unexpected event"),
6787         };
6788
6789         check_added_monitors!(nodes[1], 1);
6790 }
6791
6792 #[test]
6793 fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() {
6794         let chanmon_cfgs = create_chanmon_cfgs(3);
6795         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6796         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
6797         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6798         create_announced_chan_between_nodes(&nodes, 0, 1);
6799         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
6800
6801         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100_000);
6802
6803         // First hop
6804         let mut payment_event = {
6805                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6806                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6807                 check_added_monitors!(nodes[0], 1);
6808                 SendEvent::from_node(&nodes[0])
6809         };
6810
6811         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6812         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6813         expect_pending_htlcs_forwardable!(nodes[1]);
6814         check_added_monitors!(nodes[1], 1);
6815         payment_event = SendEvent::from_node(&nodes[1]);
6816         assert_eq!(payment_event.msgs.len(), 1);
6817
6818         // Second Hop
6819         payment_event.msgs[0].onion_routing_packet.version = 1; // Trigger an invalid_onion_version error
6820         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
6821         check_added_monitors!(nodes[2], 0);
6822         commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true);
6823
6824         let events_3 = nodes[2].node.get_and_clear_pending_msg_events();
6825         assert_eq!(events_3.len(), 1);
6826         match events_3[0] {
6827                 MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
6828                         let mut update_msg = updates.update_fail_malformed_htlcs[0].clone();
6829                         // Set the NODE bit (BADONION and PERM already set in invalid_onion_version error)
6830                         update_msg.failure_code |= 0x2000;
6831
6832                         nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), &update_msg);
6833                         commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true);
6834                 },
6835                 _ => panic!("Unexpected event"),
6836         }
6837
6838         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1],
6839                 vec![HTLCDestination::NextHopChannel {
6840                         node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
6841         let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
6842         assert_eq!(events_4.len(), 1);
6843         check_added_monitors!(nodes[1], 1);
6844
6845         match events_4[0] {
6846                 MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
6847                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
6848                         commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true);
6849                 },
6850                 _ => panic!("Unexpected event"),
6851         }
6852
6853         let events_5 = nodes[0].node.get_and_clear_pending_events();
6854         assert_eq!(events_5.len(), 2);
6855
6856         // Expect a PaymentPathFailed event with a ChannelFailure network update for the channel between
6857         // the node originating the error to its next hop.
6858         match events_5[0] {
6859                 Event::PaymentPathFailed { error_code, failure: PathFailure::OnPath { network_update: Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent }) }, ..
6860                 } => {
6861                         assert_eq!(short_channel_id, chan_2.0.contents.short_channel_id);
6862                         assert!(is_permanent);
6863                         assert_eq!(error_code, Some(0x8000|0x4000|0x2000|4));
6864                 },
6865                 _ => panic!("Unexpected event"),
6866         }
6867         match events_5[1] {
6868                 Event::PaymentFailed { payment_hash, .. } => {
6869                         assert_eq!(payment_hash, our_payment_hash);
6870                 },
6871                 _ => panic!("Unexpected event"),
6872         }
6873
6874         // TODO: Test actual removal of channel from NetworkGraph when it's implemented.
6875 }
6876
6877 fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
6878         // Dust-HTLC failure updates must be delayed until failure-trigger tx (in this case local commitment) reach ANTI_REORG_DELAY
6879         // We can have at most two valid local commitment tx, so both cases must be covered, and both txs must be checked to get them all as
6880         // HTLC could have been removed from lastest local commitment tx but still valid until we get remote RAA
6881
6882         let mut chanmon_cfgs = create_chanmon_cfgs(2);
6883         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
6884         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6885         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6886         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6887         let chan =create_announced_chan_between_nodes(&nodes, 0, 1);
6888
6889         let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
6890                 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis;
6891
6892         // We route 2 dust-HTLCs between A and B
6893         let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
6894         let (_, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
6895         route_payment(&nodes[0], &[&nodes[1]], 1000000);
6896
6897         // Cache one local commitment tx as previous
6898         let as_prev_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
6899
6900         // Fail one HTLC to prune it in the will-be-latest-local commitment tx
6901         nodes[1].node.fail_htlc_backwards(&payment_hash_2);
6902         check_added_monitors!(nodes[1], 0);
6903         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
6904         check_added_monitors!(nodes[1], 1);
6905
6906         let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6907         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &remove.update_fail_htlcs[0]);
6908         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &remove.commitment_signed);
6909         check_added_monitors!(nodes[0], 1);
6910
6911         // Cache one local commitment tx as lastest
6912         let as_last_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
6913
6914         let events = nodes[0].node.get_and_clear_pending_msg_events();
6915         match events[0] {
6916                 MessageSendEvent::SendRevokeAndACK { node_id, .. } => {
6917                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
6918                 },
6919                 _ => panic!("Unexpected event"),
6920         }
6921         match events[1] {
6922                 MessageSendEvent::UpdateHTLCs { node_id, .. } => {
6923                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
6924                 },
6925                 _ => panic!("Unexpected event"),
6926         }
6927
6928         assert_ne!(as_prev_commitment_tx, as_last_commitment_tx);
6929         // Fail the 2 dust-HTLCs, move their failure in maturation buffer (htlc_updated_waiting_threshold_conf)
6930         if announce_latest {
6931                 mine_transaction(&nodes[0], &as_last_commitment_tx[0]);
6932         } else {
6933                 mine_transaction(&nodes[0], &as_prev_commitment_tx[0]);
6934         }
6935
6936         check_closed_broadcast!(nodes[0], true);
6937         check_added_monitors!(nodes[0], 1);
6938         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
6939
6940         assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
6941         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
6942         let events = nodes[0].node.get_and_clear_pending_events();
6943         // Only 2 PaymentPathFailed events should show up, over-dust HTLC has to be failed by timeout tx
6944         assert_eq!(events.len(), 4);
6945         let mut first_failed = false;
6946         for event in events {
6947                 match event {
6948                         Event::PaymentPathFailed { payment_hash, .. } => {
6949                                 if payment_hash == payment_hash_1 {
6950                                         assert!(!first_failed);
6951                                         first_failed = true;
6952                                 } else {
6953                                         assert_eq!(payment_hash, payment_hash_2);
6954                                 }
6955                         },
6956                         Event::PaymentFailed { .. } => {}
6957                         _ => panic!("Unexpected event"),
6958                 }
6959         }
6960 }
6961
6962 #[test]
6963 fn test_failure_delay_dust_htlc_local_commitment() {
6964         do_test_failure_delay_dust_htlc_local_commitment(true);
6965         do_test_failure_delay_dust_htlc_local_commitment(false);
6966 }
6967
6968 fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
6969         // Outbound HTLC-failure updates must be cancelled if we get a reorg before we reach ANTI_REORG_DELAY.
6970         // Broadcast of revoked remote commitment tx, trigger failure-update of dust/non-dust HTLCs
6971         // Broadcast of remote commitment tx, trigger failure-update of dust-HTLCs
6972         // Broadcast of timeout tx on remote commitment tx, trigger failure-udate of non-dust HTLCs
6973         // Broadcast of local commitment tx, trigger failure-update of dust-HTLCs
6974         // Broadcast of HTLC-timeout tx on local commitment tx, trigger failure-update of non-dust HTLCs
6975
6976         let chanmon_cfgs = create_chanmon_cfgs(3);
6977         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6978         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
6979         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6980         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6981
6982         let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
6983                 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis;
6984
6985         let (_payment_preimage_1, dust_hash, _payment_secret_1) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
6986         let (_payment_preimage_2, non_dust_hash, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
6987
6988         let as_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
6989         let bs_commitment_tx = get_local_commitment_txn!(nodes[1], chan.2);
6990
6991         // We revoked bs_commitment_tx
6992         if revoked {
6993                 let (payment_preimage_3, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
6994                 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
6995         }
6996
6997         let mut timeout_tx = Vec::new();
6998         if local {
6999                 // We fail dust-HTLC 1 by broadcast of local commitment tx
7000                 mine_transaction(&nodes[0], &as_commitment_tx[0]);
7001                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
7002                 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7003                 expect_payment_failed!(nodes[0], dust_hash, false);
7004
7005                 connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS - ANTI_REORG_DELAY);
7006                 check_closed_broadcast!(nodes[0], true);
7007                 check_added_monitors!(nodes[0], 1);
7008                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7009                 timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
7010                 assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
7011                 // We fail non-dust-HTLC 2 by broadcast of local HTLC-timeout tx on local commitment tx
7012                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7013                 mine_transaction(&nodes[0], &timeout_tx[0]);
7014                 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7015                 expect_payment_failed!(nodes[0], non_dust_hash, false);
7016         } else {
7017                 // We fail dust-HTLC 1 by broadcast of remote commitment tx. If revoked, fail also non-dust HTLC
7018                 mine_transaction(&nodes[0], &bs_commitment_tx[0]);
7019                 check_closed_broadcast!(nodes[0], true);
7020                 check_added_monitors!(nodes[0], 1);
7021                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
7022                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7023
7024                 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
7025                 timeout_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().drain(..)
7026                         .filter(|tx| tx.input[0].previous_output.txid == bs_commitment_tx[0].txid()).collect();
7027                 check_spends!(timeout_tx[0], bs_commitment_tx[0]);
7028                 // For both a revoked or non-revoked commitment transaction, after ANTI_REORG_DELAY the
7029                 // dust HTLC should have been failed.
7030                 expect_payment_failed!(nodes[0], dust_hash, false);
7031
7032                 if !revoked {
7033                         assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
7034                 } else {
7035                         assert_eq!(timeout_tx[0].lock_time.0, 11);
7036                 }
7037                 // We fail non-dust-HTLC 2 by broadcast of local timeout/revocation-claim tx
7038                 mine_transaction(&nodes[0], &timeout_tx[0]);
7039                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7040                 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7041                 expect_payment_failed!(nodes[0], non_dust_hash, false);
7042         }
7043 }
7044
7045 #[test]
7046 fn test_sweep_outbound_htlc_failure_update() {
7047         do_test_sweep_outbound_htlc_failure_update(false, true);
7048         do_test_sweep_outbound_htlc_failure_update(false, false);
7049         do_test_sweep_outbound_htlc_failure_update(true, false);
7050 }
7051
7052 #[test]
7053 fn test_user_configurable_csv_delay() {
7054         // We test our channel constructors yield errors when we pass them absurd csv delay
7055
7056         let mut low_our_to_self_config = UserConfig::default();
7057         low_our_to_self_config.channel_handshake_config.our_to_self_delay = 6;
7058         let mut high_their_to_self_config = UserConfig::default();
7059         high_their_to_self_config.channel_handshake_limits.their_to_self_delay = 100;
7060         let user_cfgs = [Some(high_their_to_self_config.clone()), None];
7061         let chanmon_cfgs = create_chanmon_cfgs(2);
7062         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7063         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
7064         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7065
7066         // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in OutboundV1Channel::new()
7067         if let Err(error) = OutboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
7068                 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[1].node.init_features(), 1000000, 1000000, 0,
7069                 &low_our_to_self_config, 0, 42)
7070         {
7071                 match error {
7072                         APIError::APIMisuseError { err } => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); },
7073                         _ => panic!("Unexpected event"),
7074                 }
7075         } else { assert!(false) }
7076
7077         // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in InboundV1Channel::new()
7078         nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
7079         let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
7080         open_channel.to_self_delay = 200;
7081         if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
7082                 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
7083                 &low_our_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
7084         {
7085                 match error {
7086                         ChannelError::Close(err) => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str()));  },
7087                         _ => panic!("Unexpected event"),
7088                 }
7089         } else { assert!(false); }
7090
7091         // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Chanel::accept_channel()
7092         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
7093         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
7094         let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
7095         accept_channel.to_self_delay = 200;
7096         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
7097         let reason_msg;
7098         if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] {
7099                 match action {
7100                         &ErrorAction::SendErrorMessage { ref msg } => {
7101                                 assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(msg.data.as_str()));
7102                                 reason_msg = msg.data.clone();
7103                         },
7104                         _ => { panic!(); }
7105                 }
7106         } else { panic!(); }
7107         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg }, [nodes[1].node.get_our_node_id()], 1000000);
7108
7109         // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new()
7110         nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
7111         let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
7112         open_channel.to_self_delay = 200;
7113         if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
7114                 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
7115                 &high_their_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
7116         {
7117                 match error {
7118                         ChannelError::Close(err) => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(err.as_str())); },
7119                         _ => panic!("Unexpected event"),
7120                 }
7121         } else { assert!(false); }
7122 }
7123
7124 #[test]
7125 fn test_check_htlc_underpaying() {
7126         // Send payment through A -> B but A is maliciously
7127         // sending a probe payment (i.e less than expected value0
7128         // to B, B should refuse payment.
7129
7130         let chanmon_cfgs = create_chanmon_cfgs(2);
7131         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7132         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7133         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7134
7135         // Create some initial channels
7136         create_announced_chan_between_nodes(&nodes, 0, 1);
7137
7138         let scorer = test_utils::TestScorer::new();
7139         let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
7140         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV).with_bolt11_features(nodes[1].node.invoice_features()).unwrap();
7141         let route = get_route(&nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(), None, 10_000, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
7142         let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]);
7143         let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, None).unwrap();
7144         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
7145                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
7146         check_added_monitors!(nodes[0], 1);
7147
7148         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
7149         assert_eq!(events.len(), 1);
7150         let mut payment_event = SendEvent::from_event(events.pop().unwrap());
7151         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
7152         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
7153
7154         // Note that we first have to wait a random delay before processing the receipt of the HTLC,
7155         // and then will wait a second random delay before failing the HTLC back:
7156         expect_pending_htlcs_forwardable!(nodes[1]);
7157         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
7158
7159         // Node 3 is expecting payment of 100_000 but received 10_000,
7160         // it should fail htlc like we didn't know the preimage.
7161         nodes[1].node.process_pending_htlc_forwards();
7162
7163         let events = nodes[1].node.get_and_clear_pending_msg_events();
7164         assert_eq!(events.len(), 1);
7165         let (update_fail_htlc, commitment_signed) = match events[0] {
7166                 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
7167                         assert!(update_add_htlcs.is_empty());
7168                         assert!(update_fulfill_htlcs.is_empty());
7169                         assert_eq!(update_fail_htlcs.len(), 1);
7170                         assert!(update_fail_malformed_htlcs.is_empty());
7171                         assert!(update_fee.is_none());
7172                         (update_fail_htlcs[0].clone(), commitment_signed)
7173                 },
7174                 _ => panic!("Unexpected event"),
7175         };
7176         check_added_monitors!(nodes[1], 1);
7177
7178         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlc);
7179         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
7180
7181         // 10_000 msat as u64, followed by a height of CHAN_CONFIRM_DEPTH as u32
7182         let mut expected_failure_data = (10_000 as u64).to_be_bytes().to_vec();
7183         expected_failure_data.extend_from_slice(&CHAN_CONFIRM_DEPTH.to_be_bytes());
7184         expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000|15, &expected_failure_data[..]);
7185 }
7186
7187 #[test]
7188 fn test_announce_disable_channels() {
7189         // Create 2 channels between A and B. Disconnect B. Call timer_tick_occurred and check for generated
7190         // ChannelUpdate. Reconnect B, reestablish and check there is non-generated ChannelUpdate.
7191
7192         let chanmon_cfgs = create_chanmon_cfgs(2);
7193         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7194         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7195         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7196
7197         create_announced_chan_between_nodes(&nodes, 0, 1);
7198         create_announced_chan_between_nodes(&nodes, 1, 0);
7199         create_announced_chan_between_nodes(&nodes, 0, 1);
7200
7201         // Disconnect peers
7202         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
7203         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
7204
7205         for _ in 0..DISABLE_GOSSIP_TICKS + 1 {
7206                 nodes[0].node.timer_tick_occurred();
7207         }
7208         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
7209         assert_eq!(msg_events.len(), 3);
7210         let mut chans_disabled = HashMap::new();
7211         for e in msg_events {
7212                 match e {
7213                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
7214                                 assert_eq!(msg.contents.flags & (1<<1), 1<<1); // The "channel disabled" bit should be set
7215                                 // Check that each channel gets updated exactly once
7216                                 if chans_disabled.insert(msg.contents.short_channel_id, msg.contents.timestamp).is_some() {
7217                                         panic!("Generated ChannelUpdate for wrong chan!");
7218                                 }
7219                         },
7220                         _ => panic!("Unexpected event"),
7221                 }
7222         }
7223         // Reconnect peers
7224         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
7225                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
7226         }, true).unwrap();
7227         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
7228         assert_eq!(reestablish_1.len(), 3);
7229         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
7230                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
7231         }, false).unwrap();
7232         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
7233         assert_eq!(reestablish_2.len(), 3);
7234
7235         // Reestablish chan_1
7236         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
7237         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7238         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
7239         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7240         // Reestablish chan_2
7241         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[1]);
7242         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7243         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[1]);
7244         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7245         // Reestablish chan_3
7246         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[2]);
7247         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7248         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[2]);
7249         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7250
7251         for _ in 0..ENABLE_GOSSIP_TICKS {
7252                 nodes[0].node.timer_tick_occurred();
7253         }
7254         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7255         nodes[0].node.timer_tick_occurred();
7256         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
7257         assert_eq!(msg_events.len(), 3);
7258         for e in msg_events {
7259                 match e {
7260                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
7261                                 assert_eq!(msg.contents.flags & (1<<1), 0); // The "channel disabled" bit should be off
7262                                 match chans_disabled.remove(&msg.contents.short_channel_id) {
7263                                         // Each update should have a higher timestamp than the previous one, replacing
7264                                         // the old one.
7265                                         Some(prev_timestamp) => assert!(msg.contents.timestamp > prev_timestamp),
7266                                         None => panic!("Generated ChannelUpdate for wrong chan!"),
7267                                 }
7268                         },
7269                         _ => panic!("Unexpected event"),
7270                 }
7271         }
7272         // Check that each channel gets updated exactly once
7273         assert!(chans_disabled.is_empty());
7274 }
7275
7276 #[test]
7277 fn test_bump_penalty_txn_on_revoked_commitment() {
7278         // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to be sure
7279         // we're able to claim outputs on revoked commitment transaction before timelocks expiration
7280
7281         let chanmon_cfgs = create_chanmon_cfgs(2);
7282         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7283         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7284         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7285
7286         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7287
7288         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
7289         let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 30)
7290                 .with_bolt11_features(nodes[0].node.invoice_features()).unwrap();
7291         let (route,_, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], payment_params, 3000000);
7292         send_along_route(&nodes[1], route, &vec!(&nodes[0])[..], 3000000);
7293
7294         let revoked_txn = get_local_commitment_txn!(nodes[0], chan.2);
7295         // Revoked commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
7296         assert_eq!(revoked_txn[0].output.len(), 4);
7297         assert_eq!(revoked_txn[0].input.len(), 1);
7298         assert_eq!(revoked_txn[0].input[0].previous_output.txid, chan.3.txid());
7299         let revoked_txid = revoked_txn[0].txid();
7300
7301         let mut penalty_sum = 0;
7302         for outp in revoked_txn[0].output.iter() {
7303                 if outp.script_pubkey.is_v0_p2wsh() {
7304                         penalty_sum += outp.value;
7305                 }
7306         }
7307
7308         // Connect blocks to change height_timer range to see if we use right soonest_timelock
7309         let header_114 = connect_blocks(&nodes[1], 14);
7310
7311         // Actually revoke tx by claiming a HTLC
7312         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
7313         connect_block(&nodes[1], &create_dummy_block(header_114, 42, vec![revoked_txn[0].clone()]));
7314         check_added_monitors!(nodes[1], 1);
7315
7316         // One or more justice tx should have been broadcast, check it
7317         let penalty_1;
7318         let feerate_1;
7319         {
7320                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7321                 assert_eq!(node_txn.len(), 1); // justice tx (broadcasted from ChannelMonitor)
7322                 assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7323                 assert_eq!(node_txn[0].output.len(), 1);
7324                 check_spends!(node_txn[0], revoked_txn[0]);
7325                 let fee_1 = penalty_sum - node_txn[0].output[0].value;
7326                 feerate_1 = fee_1 * 1000 / node_txn[0].weight() as u64;
7327                 penalty_1 = node_txn[0].txid();
7328                 node_txn.clear();
7329         };
7330
7331         // After exhaustion of height timer, a new bumped justice tx should have been broadcast, check it
7332         connect_blocks(&nodes[1], 15);
7333         let mut penalty_2 = penalty_1;
7334         let mut feerate_2 = 0;
7335         {
7336                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7337                 assert_eq!(node_txn.len(), 1);
7338                 if node_txn[0].input[0].previous_output.txid == revoked_txid {
7339                         assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7340                         assert_eq!(node_txn[0].output.len(), 1);
7341                         check_spends!(node_txn[0], revoked_txn[0]);
7342                         penalty_2 = node_txn[0].txid();
7343                         // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
7344                         assert_ne!(penalty_2, penalty_1);
7345                         let fee_2 = penalty_sum - node_txn[0].output[0].value;
7346                         feerate_2 = fee_2 * 1000 / node_txn[0].weight() as u64;
7347                         // Verify 25% bump heuristic
7348                         assert!(feerate_2 * 100 >= feerate_1 * 125);
7349                         node_txn.clear();
7350                 }
7351         }
7352         assert_ne!(feerate_2, 0);
7353
7354         // After exhaustion of height timer for a 2nd time, a new bumped justice tx should have been broadcast, check it
7355         connect_blocks(&nodes[1], 1);
7356         let penalty_3;
7357         let mut feerate_3 = 0;
7358         {
7359                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7360                 assert_eq!(node_txn.len(), 1);
7361                 if node_txn[0].input[0].previous_output.txid == revoked_txid {
7362                         assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7363                         assert_eq!(node_txn[0].output.len(), 1);
7364                         check_spends!(node_txn[0], revoked_txn[0]);
7365                         penalty_3 = node_txn[0].txid();
7366                         // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
7367                         assert_ne!(penalty_3, penalty_2);
7368                         let fee_3 = penalty_sum - node_txn[0].output[0].value;
7369                         feerate_3 = fee_3 * 1000 / node_txn[0].weight() as u64;
7370                         // Verify 25% bump heuristic
7371                         assert!(feerate_3 * 100 >= feerate_2 * 125);
7372                         node_txn.clear();
7373                 }
7374         }
7375         assert_ne!(feerate_3, 0);
7376
7377         nodes[1].node.get_and_clear_pending_events();
7378         nodes[1].node.get_and_clear_pending_msg_events();
7379 }
7380
7381 #[test]
7382 fn test_bump_penalty_txn_on_revoked_htlcs() {
7383         // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to sure
7384         // we're able to claim outputs on revoked HTLC transactions before timelocks expiration
7385
7386         let mut chanmon_cfgs = create_chanmon_cfgs(2);
7387         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
7388         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7389         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7390         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7391
7392         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7393         // Lock HTLC in both directions (using a slightly lower CLTV delay to provide timely RBF bumps)
7394         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 50).with_bolt11_features(nodes[1].node.invoice_features()).unwrap();
7395         let scorer = test_utils::TestScorer::new();
7396         let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
7397         let route = get_route(&nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(), None,
7398                 3_000_000, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
7399         let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 3_000_000).0;
7400         let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 50).with_bolt11_features(nodes[0].node.invoice_features()).unwrap();
7401         let route = get_route(&nodes[1].node.get_our_node_id(), &payment_params, &nodes[1].network_graph.read_only(), None,
7402                 3_000_000, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
7403         send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000);
7404
7405         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
7406         assert_eq!(revoked_local_txn[0].input.len(), 1);
7407         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
7408
7409         // Revoke local commitment tx
7410         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
7411
7412         // B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx
7413         connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone()]));
7414         check_closed_broadcast!(nodes[1], true);
7415         check_added_monitors!(nodes[1], 1);
7416         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000);
7417         connect_blocks(&nodes[1], 50); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
7418
7419         let revoked_htlc_txn = {
7420                 let txn = nodes[1].tx_broadcaster.unique_txn_broadcast();
7421                 assert_eq!(txn.len(), 2);
7422
7423                 assert_eq!(txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
7424                 assert_eq!(txn[0].input.len(), 1);
7425                 check_spends!(txn[0], revoked_local_txn[0]);
7426
7427                 assert_eq!(txn[1].input.len(), 1);
7428                 assert_eq!(txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
7429                 assert_eq!(txn[1].output.len(), 1);
7430                 check_spends!(txn[1], revoked_local_txn[0]);
7431
7432                 txn
7433         };
7434
7435         // Broadcast set of revoked txn on A
7436         let hash_128 = connect_blocks(&nodes[0], 40);
7437         let block_11 = create_dummy_block(hash_128, 42, vec![revoked_local_txn[0].clone()]);
7438         connect_block(&nodes[0], &block_11);
7439         let block_129 = create_dummy_block(block_11.block_hash(), 42, vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()]);
7440         connect_block(&nodes[0], &block_129);
7441         let events = nodes[0].node.get_and_clear_pending_events();
7442         expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
7443         match events.last().unwrap() {
7444                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
7445                 _ => panic!("Unexpected event"),
7446         }
7447         let first;
7448         let feerate_1;
7449         let penalty_txn;
7450         {
7451                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7452                 assert_eq!(node_txn.len(), 4); // 3 penalty txn on revoked commitment tx + 1 penalty tnx on revoked HTLC txn
7453                 // Verify claim tx are spending revoked HTLC txn
7454
7455                 // node_txn 0-2 each spend a separate revoked output from revoked_local_txn[0]
7456                 // Note that node_txn[0] and node_txn[1] are bogus - they double spend the revoked_htlc_txn
7457                 // which are included in the same block (they are broadcasted because we scan the
7458                 // transactions linearly and generate claims as we go, they likely should be removed in the
7459                 // future).
7460                 assert_eq!(node_txn[0].input.len(), 1);
7461                 check_spends!(node_txn[0], revoked_local_txn[0]);
7462                 assert_eq!(node_txn[1].input.len(), 1);
7463                 check_spends!(node_txn[1], revoked_local_txn[0]);
7464                 assert_eq!(node_txn[2].input.len(), 1);
7465                 check_spends!(node_txn[2], revoked_local_txn[0]);
7466
7467                 // Each of the three justice transactions claim a separate (single) output of the three
7468                 // available, which we check here:
7469                 assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
7470                 assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output);
7471                 assert_ne!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output);
7472
7473                 assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[1].input[0].previous_output);
7474                 assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
7475
7476                 // node_txn[3] spends the revoked outputs from the revoked_htlc_txn (which only have one
7477                 // output, checked above).
7478                 assert_eq!(node_txn[3].input.len(), 2);
7479                 assert_eq!(node_txn[3].output.len(), 1);
7480                 check_spends!(node_txn[3], revoked_htlc_txn[0], revoked_htlc_txn[1]);
7481
7482                 first = node_txn[3].txid();
7483                 // Store both feerates for later comparison
7484                 let fee_1 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[3].output[0].value;
7485                 feerate_1 = fee_1 * 1000 / node_txn[3].weight() as u64;
7486                 penalty_txn = vec![node_txn[2].clone()];
7487                 node_txn.clear();
7488         }
7489
7490         // Connect one more block to see if bumped penalty are issued for HTLC txn
7491         let block_130 = create_dummy_block(block_129.block_hash(), 42, penalty_txn);
7492         connect_block(&nodes[0], &block_130);
7493         let block_131 = create_dummy_block(block_130.block_hash(), 42, Vec::new());
7494         connect_block(&nodes[0], &block_131);
7495
7496         // Few more blocks to confirm penalty txn
7497         connect_blocks(&nodes[0], 4);
7498         assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
7499         let header_144 = connect_blocks(&nodes[0], 9);
7500         let node_txn = {
7501                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7502                 assert_eq!(node_txn.len(), 1);
7503
7504                 assert_eq!(node_txn[0].input.len(), 2);
7505                 check_spends!(node_txn[0], revoked_htlc_txn[0], revoked_htlc_txn[1]);
7506                 // Verify bumped tx is different and 25% bump heuristic
7507                 assert_ne!(first, node_txn[0].txid());
7508                 let fee_2 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[0].output[0].value;
7509                 let feerate_2 = fee_2 * 1000 / node_txn[0].weight() as u64;
7510                 assert!(feerate_2 * 100 > feerate_1 * 125);
7511                 let txn = vec![node_txn[0].clone()];
7512                 node_txn.clear();
7513                 txn
7514         };
7515         // Broadcast claim txn and confirm blocks to avoid further bumps on this outputs
7516         connect_block(&nodes[0], &create_dummy_block(header_144, 42, node_txn));
7517         connect_blocks(&nodes[0], 20);
7518         {
7519                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7520                 // We verify than no new transaction has been broadcast because previously
7521                 // we were buggy on this exact behavior by not tracking for monitoring remote HTLC outputs (see #411)
7522                 // which means we wouldn't see a spend of them by a justice tx and bumped justice tx
7523                 // were generated forever instead of safe cleaning after confirmation and ANTI_REORG_SAFE_DELAY blocks.
7524                 // Enforce spending of revoked htlc output by claiming transaction remove request as expected and dry
7525                 // up bumped justice generation.
7526                 assert_eq!(node_txn.len(), 0);
7527                 node_txn.clear();
7528         }
7529         check_closed_broadcast!(nodes[0], true);
7530         check_added_monitors!(nodes[0], 1);
7531 }
7532
7533 #[test]
7534 fn test_bump_penalty_txn_on_remote_commitment() {
7535         // In case of claim txn with too low feerates for getting into mempools, RBF-bump them to be sure
7536         // we're able to claim outputs on remote commitment transaction before timelocks expiration
7537
7538         // Create 2 HTLCs
7539         // Provide preimage for one
7540         // Check aggregation
7541
7542         let chanmon_cfgs = create_chanmon_cfgs(2);
7543         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7544         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7545         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7546
7547         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7548         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
7549         route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
7550
7551         // Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
7552         let remote_txn = get_local_commitment_txn!(nodes[0], chan.2);
7553         assert_eq!(remote_txn[0].output.len(), 4);
7554         assert_eq!(remote_txn[0].input.len(), 1);
7555         assert_eq!(remote_txn[0].input[0].previous_output.txid, chan.3.txid());
7556
7557         // Claim a HTLC without revocation (provide B monitor with preimage)
7558         nodes[1].node.claim_funds(payment_preimage);
7559         expect_payment_claimed!(nodes[1], payment_hash, 3_000_000);
7560         mine_transaction(&nodes[1], &remote_txn[0]);
7561         check_added_monitors!(nodes[1], 2);
7562         connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
7563
7564         // One or more claim tx should have been broadcast, check it
7565         let timeout;
7566         let preimage;
7567         let preimage_bump;
7568         let feerate_timeout;
7569         let feerate_preimage;
7570         {
7571                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7572                 // 3 transactions including:
7573                 //   preimage and timeout sweeps from remote commitment + preimage sweep bump
7574                 assert_eq!(node_txn.len(), 3);
7575                 assert_eq!(node_txn[0].input.len(), 1);
7576                 assert_eq!(node_txn[1].input.len(), 1);
7577                 assert_eq!(node_txn[2].input.len(), 1);
7578                 check_spends!(node_txn[0], remote_txn[0]);
7579                 check_spends!(node_txn[1], remote_txn[0]);
7580                 check_spends!(node_txn[2], remote_txn[0]);
7581
7582                 preimage = node_txn[0].txid();
7583                 let index = node_txn[0].input[0].previous_output.vout;
7584                 let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
7585                 feerate_preimage = fee * 1000 / node_txn[0].weight() as u64;
7586
7587                 let (preimage_bump_tx, timeout_tx) = if node_txn[2].input[0].previous_output == node_txn[0].input[0].previous_output {
7588                         (node_txn[2].clone(), node_txn[1].clone())
7589                 } else {
7590                         (node_txn[1].clone(), node_txn[2].clone())
7591                 };
7592
7593                 preimage_bump = preimage_bump_tx;
7594                 check_spends!(preimage_bump, remote_txn[0]);
7595                 assert_eq!(node_txn[0].input[0].previous_output, preimage_bump.input[0].previous_output);
7596
7597                 timeout = timeout_tx.txid();
7598                 let index = timeout_tx.input[0].previous_output.vout;
7599                 let fee = remote_txn[0].output[index as usize].value - timeout_tx.output[0].value;
7600                 feerate_timeout = fee * 1000 / timeout_tx.weight() as u64;
7601
7602                 node_txn.clear();
7603         };
7604         assert_ne!(feerate_timeout, 0);
7605         assert_ne!(feerate_preimage, 0);
7606
7607         // After exhaustion of height timer, new bumped claim txn should have been broadcast, check it
7608         connect_blocks(&nodes[1], 1);
7609         {
7610                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7611                 assert_eq!(node_txn.len(), 1);
7612                 assert_eq!(node_txn[0].input.len(), 1);
7613                 assert_eq!(preimage_bump.input.len(), 1);
7614                 check_spends!(node_txn[0], remote_txn[0]);
7615                 check_spends!(preimage_bump, remote_txn[0]);
7616
7617                 let index = preimage_bump.input[0].previous_output.vout;
7618                 let fee = remote_txn[0].output[index as usize].value - preimage_bump.output[0].value;
7619                 let new_feerate = fee * 1000 / preimage_bump.weight() as u64;
7620                 assert!(new_feerate * 100 > feerate_timeout * 125);
7621                 assert_ne!(timeout, preimage_bump.txid());
7622
7623                 let index = node_txn[0].input[0].previous_output.vout;
7624                 let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
7625                 let new_feerate = fee * 1000 / node_txn[0].weight() as u64;
7626                 assert!(new_feerate * 100 > feerate_preimage * 125);
7627                 assert_ne!(preimage, node_txn[0].txid());
7628
7629                 node_txn.clear();
7630         }
7631
7632         nodes[1].node.get_and_clear_pending_events();
7633         nodes[1].node.get_and_clear_pending_msg_events();
7634 }
7635
7636 #[test]
7637 fn test_counterparty_raa_skip_no_crash() {
7638         // Previously, if our counterparty sent two RAAs in a row without us having provided a
7639         // commitment transaction, we would have happily carried on and provided them the next
7640         // commitment transaction based on one RAA forward. This would probably eventually have led to
7641         // channel closure, but it would not have resulted in funds loss. Still, our
7642         // EnforcingSigner would have panicked as it doesn't like jumps into the future. Here, we
7643         // check simply that the channel is closed in response to such an RAA, but don't check whether
7644         // we decide to punish our counterparty for revoking their funds (as we don't currently
7645         // implement that).
7646         let chanmon_cfgs = create_chanmon_cfgs(2);
7647         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7648         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7649         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7650         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
7651
7652         let per_commitment_secret;
7653         let next_per_commitment_point;
7654         {
7655                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
7656                 let mut guard = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
7657                 let keys = guard.channel_by_id.get_mut(&channel_id).unwrap().get_signer();
7658
7659                 const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
7660
7661                 // Make signer believe we got a counterparty signature, so that it allows the revocation
7662                 keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
7663                 per_commitment_secret = keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
7664
7665                 // Must revoke without gaps
7666                 keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
7667                 keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
7668
7669                 keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
7670                 next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
7671                         &SecretKey::from_slice(&keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
7672         }
7673
7674         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(),
7675                 &msgs::RevokeAndACK {
7676                         channel_id,
7677                         per_commitment_secret,
7678                         next_per_commitment_point,
7679                         #[cfg(taproot)]
7680                         next_local_nonce: None,
7681                 });
7682         assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
7683         check_added_monitors!(nodes[1], 1);
7684         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() }
7685                 , [nodes[0].node.get_our_node_id()], 100000);
7686 }
7687
7688 #[test]
7689 fn test_bump_txn_sanitize_tracking_maps() {
7690         // Sanitizing pendning_claim_request and claimable_outpoints used to be buggy,
7691         // verify we clean then right after expiration of ANTI_REORG_DELAY.
7692
7693         let chanmon_cfgs = create_chanmon_cfgs(2);
7694         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7695         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7696         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7697
7698         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7699         // Lock HTLC in both directions
7700         let (payment_preimage_1, _, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000);
7701         let (_, payment_hash_2, _) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000);
7702
7703         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
7704         assert_eq!(revoked_local_txn[0].input.len(), 1);
7705         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
7706
7707         // Revoke local commitment tx
7708         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
7709
7710         // Broadcast set of revoked txn on A
7711         connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH);
7712         expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[0], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
7713         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
7714
7715         mine_transaction(&nodes[0], &revoked_local_txn[0]);
7716         check_closed_broadcast!(nodes[0], true);
7717         check_added_monitors!(nodes[0], 1);
7718         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 1000000);
7719         let penalty_txn = {
7720                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7721                 assert_eq!(node_txn.len(), 3); //ChannelMonitor: justice txn * 3
7722                 check_spends!(node_txn[0], revoked_local_txn[0]);
7723                 check_spends!(node_txn[1], revoked_local_txn[0]);
7724                 check_spends!(node_txn[2], revoked_local_txn[0]);
7725                 let penalty_txn = vec![node_txn[0].clone(), node_txn[1].clone(), node_txn[2].clone()];
7726                 node_txn.clear();
7727                 penalty_txn
7728         };
7729         connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, penalty_txn));
7730         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7731         {
7732                 let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(OutPoint { txid: chan.3.txid(), index: 0 }).unwrap();
7733                 assert!(monitor.inner.lock().unwrap().onchain_tx_handler.pending_claim_requests.is_empty());
7734                 assert!(monitor.inner.lock().unwrap().onchain_tx_handler.claimable_outpoints.is_empty());
7735         }
7736 }
7737
7738 #[test]
7739 fn test_channel_conf_timeout() {
7740         // Tests that, for inbound channels, we give up on them if the funding transaction does not
7741         // confirm within 2016 blocks, as recommended by BOLT 2.
7742         let chanmon_cfgs = create_chanmon_cfgs(2);
7743         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7744         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7745         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7746
7747         let _funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 100_000);
7748
7749         // The outbound node should wait forever for confirmation:
7750         // This matches `channel::FUNDING_CONF_DEADLINE_BLOCKS` and BOLT 2's suggested timeout, thus is
7751         // copied here instead of directly referencing the constant.
7752         connect_blocks(&nodes[0], 2016);
7753         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7754
7755         // The inbound node should fail the channel after exactly 2016 blocks
7756         connect_blocks(&nodes[1], 2015);
7757         check_added_monitors!(nodes[1], 0);
7758         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7759
7760         connect_blocks(&nodes[1], 1);
7761         check_added_monitors!(nodes[1], 1);
7762         check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut, [nodes[0].node.get_our_node_id()], 1000000);
7763         let close_ev = nodes[1].node.get_and_clear_pending_msg_events();
7764         assert_eq!(close_ev.len(), 1);
7765         match close_ev[0] {
7766                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, ref node_id } => {
7767                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
7768                         assert_eq!(msg.data, "Channel closed because funding transaction failed to confirm within 2016 blocks");
7769                 },
7770                 _ => panic!("Unexpected event"),
7771         }
7772 }
7773
7774 #[test]
7775 fn test_override_channel_config() {
7776         let chanmon_cfgs = create_chanmon_cfgs(2);
7777         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7778         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7779         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7780
7781         // Node0 initiates a channel to node1 using the override config.
7782         let mut override_config = UserConfig::default();
7783         override_config.channel_handshake_config.our_to_self_delay = 200;
7784
7785         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, Some(override_config)).unwrap();
7786
7787         // Assert the channel created by node0 is using the override config.
7788         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7789         assert_eq!(res.channel_flags, 0);
7790         assert_eq!(res.to_self_delay, 200);
7791 }
7792
7793 #[test]
7794 fn test_override_0msat_htlc_minimum() {
7795         let mut zero_config = UserConfig::default();
7796         zero_config.channel_handshake_config.our_htlc_minimum_msat = 0;
7797         let chanmon_cfgs = create_chanmon_cfgs(2);
7798         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7799         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(zero_config.clone())]);
7800         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7801
7802         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, Some(zero_config)).unwrap();
7803         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7804         assert_eq!(res.htlc_minimum_msat, 1);
7805
7806         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
7807         let res = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
7808         assert_eq!(res.htlc_minimum_msat, 1);
7809 }
7810
7811 #[test]
7812 fn test_channel_update_has_correct_htlc_maximum_msat() {
7813         // Tests that the `ChannelUpdate` message has the correct values for `htlc_maximum_msat` set.
7814         // Bolt 7 specifies that if present `htlc_maximum_msat`:
7815         // 1. MUST be set to less than or equal to the channel capacity. In LDK, this is capped to
7816         // 90% of the `channel_value`.
7817         // 2. MUST be set to less than or equal to the `max_htlc_value_in_flight_msat` received from the peer.
7818
7819         let mut config_30_percent = UserConfig::default();
7820         config_30_percent.channel_handshake_config.announced_channel = true;
7821         config_30_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 30;
7822         let mut config_50_percent = UserConfig::default();
7823         config_50_percent.channel_handshake_config.announced_channel = true;
7824         config_50_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 50;
7825         let mut config_95_percent = UserConfig::default();
7826         config_95_percent.channel_handshake_config.announced_channel = true;
7827         config_95_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 95;
7828         let mut config_100_percent = UserConfig::default();
7829         config_100_percent.channel_handshake_config.announced_channel = true;
7830         config_100_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100;
7831
7832         let chanmon_cfgs = create_chanmon_cfgs(4);
7833         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
7834         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[Some(config_30_percent), Some(config_50_percent), Some(config_95_percent), Some(config_100_percent)]);
7835         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
7836
7837         let channel_value_satoshis = 100000;
7838         let channel_value_msat = channel_value_satoshis * 1000;
7839         let channel_value_30_percent_msat = (channel_value_msat as f64 * 0.3) as u64;
7840         let channel_value_50_percent_msat = (channel_value_msat as f64 * 0.5) as u64;
7841         let channel_value_90_percent_msat = (channel_value_msat as f64 * 0.9) as u64;
7842
7843         let (node_0_chan_update, node_1_chan_update, _, _)  = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value_satoshis, 10001);
7844         let (node_2_chan_update, node_3_chan_update, _, _)  = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, channel_value_satoshis, 10001);
7845
7846         // Assert that `node[0]`'s `ChannelUpdate` is capped at 50 percent of the `channel_value`, as
7847         // that's the value of `node[1]`'s `holder_max_htlc_value_in_flight_msat`.
7848         assert_eq!(node_0_chan_update.contents.htlc_maximum_msat, channel_value_50_percent_msat);
7849         // Assert that `node[1]`'s `ChannelUpdate` is capped at 30 percent of the `channel_value`, as
7850         // that's the value of `node[0]`'s `holder_max_htlc_value_in_flight_msat`.
7851         assert_eq!(node_1_chan_update.contents.htlc_maximum_msat, channel_value_30_percent_msat);
7852
7853         // Assert that `node[2]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
7854         // the value of `node[3]`'s `holder_max_htlc_value_in_flight_msat` (100%), exceeds 90% of the
7855         // `channel_value`.
7856         assert_eq!(node_2_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
7857         // Assert that `node[3]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
7858         // the value of `node[2]`'s `holder_max_htlc_value_in_flight_msat` (95%), exceeds 90% of the
7859         // `channel_value`.
7860         assert_eq!(node_3_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
7861 }
7862
7863 #[test]
7864 fn test_manually_accept_inbound_channel_request() {
7865         let mut manually_accept_conf = UserConfig::default();
7866         manually_accept_conf.manually_accept_inbound_channels = true;
7867         let chanmon_cfgs = create_chanmon_cfgs(2);
7868         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7869         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
7870         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7871
7872         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap();
7873         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7874
7875         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
7876
7877         // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
7878         // accepting the inbound channel request.
7879         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
7880
7881         let events = nodes[1].node.get_and_clear_pending_events();
7882         match events[0] {
7883                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
7884                         nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23).unwrap();
7885                 }
7886                 _ => panic!("Unexpected event"),
7887         }
7888
7889         let accept_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
7890         assert_eq!(accept_msg_ev.len(), 1);
7891
7892         match accept_msg_ev[0] {
7893                 MessageSendEvent::SendAcceptChannel { ref node_id, .. } => {
7894                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
7895                 }
7896                 _ => panic!("Unexpected event"),
7897         }
7898
7899         nodes[1].node.force_close_broadcasting_latest_txn(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
7900
7901         let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
7902         assert_eq!(close_msg_ev.len(), 1);
7903
7904         let events = nodes[1].node.get_and_clear_pending_events();
7905         match events[0] {
7906                 Event::ChannelClosed { user_channel_id, .. } => {
7907                         assert_eq!(user_channel_id, 23);
7908                 }
7909                 _ => panic!("Unexpected event"),
7910         }
7911 }
7912
7913 #[test]
7914 fn test_manually_reject_inbound_channel_request() {
7915         let mut manually_accept_conf = UserConfig::default();
7916         manually_accept_conf.manually_accept_inbound_channels = true;
7917         let chanmon_cfgs = create_chanmon_cfgs(2);
7918         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7919         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
7920         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7921
7922         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap();
7923         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7924
7925         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
7926
7927         // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
7928         // rejecting the inbound channel request.
7929         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
7930
7931         let events = nodes[1].node.get_and_clear_pending_events();
7932         match events[0] {
7933                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
7934                         nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
7935                 }
7936                 _ => panic!("Unexpected event"),
7937         }
7938
7939         let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
7940         assert_eq!(close_msg_ev.len(), 1);
7941
7942         match close_msg_ev[0] {
7943                 MessageSendEvent::HandleError { ref node_id, .. } => {
7944                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
7945                 }
7946                 _ => panic!("Unexpected event"),
7947         }
7948
7949         // There should be no more events to process, as the channel was never opened.
7950         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
7951 }
7952
7953 #[test]
7954 fn test_can_not_accept_inbound_channel_twice() {
7955         let mut manually_accept_conf = UserConfig::default();
7956         manually_accept_conf.manually_accept_inbound_channels = true;
7957         let chanmon_cfgs = create_chanmon_cfgs(2);
7958         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7959         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
7960         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7961
7962         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap();
7963         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7964
7965         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
7966
7967         // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
7968         // accepting the inbound channel request.
7969         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
7970
7971         let events = nodes[1].node.get_and_clear_pending_events();
7972         match events[0] {
7973                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
7974                         nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
7975                         let api_res = nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0);
7976                         match api_res {
7977                                 Err(APIError::APIMisuseError { err }) => {
7978                                         assert_eq!(err, "No such channel awaiting to be accepted.");
7979                                 },
7980                                 Ok(_) => panic!("Channel shouldn't be possible to be accepted twice"),
7981                                 Err(e) => panic!("Unexpected Error {:?}", e),
7982                         }
7983                 }
7984                 _ => panic!("Unexpected event"),
7985         }
7986
7987         // Ensure that the channel wasn't closed after attempting to accept it twice.
7988         let accept_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
7989         assert_eq!(accept_msg_ev.len(), 1);
7990
7991         match accept_msg_ev[0] {
7992                 MessageSendEvent::SendAcceptChannel { ref node_id, .. } => {
7993                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
7994                 }
7995                 _ => panic!("Unexpected event"),
7996         }
7997 }
7998
7999 #[test]
8000 fn test_can_not_accept_unknown_inbound_channel() {
8001         let chanmon_cfg = create_chanmon_cfgs(2);
8002         let node_cfg = create_node_cfgs(2, &chanmon_cfg);
8003         let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
8004         let nodes = create_network(2, &node_cfg, &node_chanmgr);
8005
8006         let unknown_channel_id = [0; 32];
8007         let api_res = nodes[0].node.accept_inbound_channel(&unknown_channel_id, &nodes[1].node.get_our_node_id(), 0);
8008         match api_res {
8009                 Err(APIError::APIMisuseError { err }) => {
8010                         assert_eq!(err, "No such channel awaiting to be accepted.");
8011                 },
8012                 Ok(_) => panic!("It shouldn't be possible to accept an unkown channel"),
8013                 Err(e) => panic!("Unexpected Error: {:?}", e),
8014         }
8015 }
8016
8017 #[test]
8018 fn test_onion_value_mpp_set_calculation() {
8019         // Test that we use the onion value `amt_to_forward` when
8020         // calculating whether we've reached the `total_msat` of an MPP
8021         // by having a routing node forward more than `amt_to_forward`
8022         // and checking that the receiving node doesn't generate
8023         // a PaymentClaimable event too early
8024         let node_count = 4;
8025         let chanmon_cfgs = create_chanmon_cfgs(node_count);
8026         let node_cfgs = create_node_cfgs(node_count, &chanmon_cfgs);
8027         let node_chanmgrs = create_node_chanmgrs(node_count, &node_cfgs, &vec![None; node_count]);
8028         let mut nodes = create_network(node_count, &node_cfgs, &node_chanmgrs);
8029
8030         let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8031         let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
8032         let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
8033         let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
8034
8035         let total_msat = 100_000;
8036         let expected_paths: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]];
8037         let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], total_msat);
8038         let sample_path = route.paths.pop().unwrap();
8039
8040         let mut path_1 = sample_path.clone();
8041         path_1.hops[0].pubkey = nodes[1].node.get_our_node_id();
8042         path_1.hops[0].short_channel_id = chan_1_id;
8043         path_1.hops[1].pubkey = nodes[3].node.get_our_node_id();
8044         path_1.hops[1].short_channel_id = chan_3_id;
8045         path_1.hops[1].fee_msat = 100_000;
8046         route.paths.push(path_1);
8047
8048         let mut path_2 = sample_path.clone();
8049         path_2.hops[0].pubkey = nodes[2].node.get_our_node_id();
8050         path_2.hops[0].short_channel_id = chan_2_id;
8051         path_2.hops[1].pubkey = nodes[3].node.get_our_node_id();
8052         path_2.hops[1].short_channel_id = chan_4_id;
8053         path_2.hops[1].fee_msat = 1_000;
8054         route.paths.push(path_2);
8055
8056         // Send payment
8057         let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes());
8058         let onion_session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
8059                 RecipientOnionFields::secret_only(our_payment_secret), payment_id, &route).unwrap();
8060         nodes[0].node.test_send_payment_internal(&route, our_payment_hash,
8061                 RecipientOnionFields::secret_only(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap();
8062         check_added_monitors!(nodes[0], expected_paths.len());
8063
8064         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8065         assert_eq!(events.len(), expected_paths.len());
8066
8067         // First path
8068         let ev = remove_first_msg_event_to_node(&expected_paths[0][0].node.get_our_node_id(), &mut events);
8069         let mut payment_event = SendEvent::from_event(ev);
8070         let mut prev_node = &nodes[0];
8071
8072         for (idx, &node) in expected_paths[0].iter().enumerate() {
8073                 assert_eq!(node.node.get_our_node_id(), payment_event.node_id);
8074
8075                 if idx == 0 { // routing node
8076                         let session_priv = [3; 32];
8077                         let height = nodes[0].best_block_info().1;
8078                         let session_priv = SecretKey::from_slice(&session_priv).unwrap();
8079                         let mut onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
8080                         let (mut onion_payloads, _, _) = onion_utils::build_onion_payloads(&route.paths[0], 100_000,
8081                                 RecipientOnionFields::secret_only(our_payment_secret), height + 1, &None).unwrap();
8082                         // Edit amt_to_forward to simulate the sender having set
8083                         // the final amount and the routing node taking less fee
8084                         if let msgs::OutboundOnionPayload::Receive { ref mut amt_msat, .. } = onion_payloads[1] {
8085                                 *amt_msat = 99_000;
8086                         } else { panic!() }
8087                         let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap();
8088                         payment_event.msgs[0].onion_routing_packet = new_onion_packet;
8089                 }
8090
8091                 node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]);
8092                 check_added_monitors!(node, 0);
8093                 commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false);
8094                 expect_pending_htlcs_forwardable!(node);
8095
8096                 if idx == 0 {
8097                         let mut events_2 = node.node.get_and_clear_pending_msg_events();
8098                         assert_eq!(events_2.len(), 1);
8099                         check_added_monitors!(node, 1);
8100                         payment_event = SendEvent::from_event(events_2.remove(0));
8101                         assert_eq!(payment_event.msgs.len(), 1);
8102                 } else {
8103                         let events_2 = node.node.get_and_clear_pending_events();
8104                         assert!(events_2.is_empty());
8105                 }
8106
8107                 prev_node = node;
8108         }
8109
8110         // Second path
8111         let ev = remove_first_msg_event_to_node(&expected_paths[1][0].node.get_our_node_id(), &mut events);
8112         pass_along_path(&nodes[0], expected_paths[1], 101_000, our_payment_hash.clone(), Some(our_payment_secret), ev, true, None);
8113
8114         claim_payment_along_route(&nodes[0], expected_paths, false, our_payment_preimage);
8115 }
8116
8117 fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64) {
8118
8119         let routing_node_count = msat_amounts.len();
8120         let node_count = routing_node_count + 2;
8121
8122         let chanmon_cfgs = create_chanmon_cfgs(node_count);
8123         let node_cfgs = create_node_cfgs(node_count, &chanmon_cfgs);
8124         let node_chanmgrs = create_node_chanmgrs(node_count, &node_cfgs, &vec![None; node_count]);
8125         let nodes = create_network(node_count, &node_cfgs, &node_chanmgrs);
8126
8127         let src_idx = 0;
8128         let dst_idx = 1;
8129
8130         // Create channels for each amount
8131         let mut expected_paths = Vec::with_capacity(routing_node_count);
8132         let mut src_chan_ids = Vec::with_capacity(routing_node_count);
8133         let mut dst_chan_ids = Vec::with_capacity(routing_node_count);
8134         for i in 0..routing_node_count {
8135                 let routing_node = 2 + i;
8136                 let src_chan_id = create_announced_chan_between_nodes(&nodes, src_idx, routing_node).0.contents.short_channel_id;
8137                 src_chan_ids.push(src_chan_id);
8138                 let dst_chan_id = create_announced_chan_between_nodes(&nodes, routing_node, dst_idx).0.contents.short_channel_id;
8139                 dst_chan_ids.push(dst_chan_id);
8140                 let path = vec![&nodes[routing_node], &nodes[dst_idx]];
8141                 expected_paths.push(path);
8142         }
8143         let expected_paths: Vec<&[&Node]> = expected_paths.iter().map(|route| route.as_slice()).collect();
8144
8145         // Create a route for each amount
8146         let example_amount = 100000;
8147         let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[src_idx], nodes[dst_idx], example_amount);
8148         let sample_path = route.paths.pop().unwrap();
8149         for i in 0..routing_node_count {
8150                 let routing_node = 2 + i;
8151                 let mut path = sample_path.clone();
8152                 path.hops[0].pubkey = nodes[routing_node].node.get_our_node_id();
8153                 path.hops[0].short_channel_id = src_chan_ids[i];
8154                 path.hops[1].pubkey = nodes[dst_idx].node.get_our_node_id();
8155                 path.hops[1].short_channel_id = dst_chan_ids[i];
8156                 path.hops[1].fee_msat = msat_amounts[i];
8157                 route.paths.push(path);
8158         }
8159
8160         // Send payment with manually set total_msat
8161         let payment_id = PaymentId(nodes[src_idx].keys_manager.backing.get_secure_random_bytes());
8162         let onion_session_privs = nodes[src_idx].node.test_add_new_pending_payment(our_payment_hash,
8163                 RecipientOnionFields::secret_only(our_payment_secret), payment_id, &route).unwrap();
8164         nodes[src_idx].node.test_send_payment_internal(&route, our_payment_hash,
8165                 RecipientOnionFields::secret_only(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap();
8166         check_added_monitors!(nodes[src_idx], expected_paths.len());
8167
8168         let mut events = nodes[src_idx].node.get_and_clear_pending_msg_events();
8169         assert_eq!(events.len(), expected_paths.len());
8170         let mut amount_received = 0;
8171         for (path_idx, expected_path) in expected_paths.iter().enumerate() {
8172                 let ev = remove_first_msg_event_to_node(&expected_path[0].node.get_our_node_id(), &mut events);
8173
8174                 let current_path_amount = msat_amounts[path_idx];
8175                 amount_received += current_path_amount;
8176                 let became_claimable_now = amount_received >= total_msat && amount_received - current_path_amount < total_msat;
8177                 pass_along_path(&nodes[src_idx], expected_path, amount_received, our_payment_hash.clone(), Some(our_payment_secret), ev, became_claimable_now, None);
8178         }
8179
8180         claim_payment_along_route(&nodes[src_idx], &expected_paths, false, our_payment_preimage);
8181 }
8182
8183 #[test]
8184 fn test_overshoot_mpp() {
8185         do_test_overshoot_mpp(&[100_000, 101_000], 200_000);
8186         do_test_overshoot_mpp(&[100_000, 10_000, 100_000], 200_000);
8187 }
8188
8189 #[test]
8190 fn test_simple_mpp() {
8191         // Simple test of sending a multi-path payment.
8192         let chanmon_cfgs = create_chanmon_cfgs(4);
8193         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
8194         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
8195         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
8196
8197         let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8198         let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
8199         let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
8200         let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
8201
8202         let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
8203         let path = route.paths[0].clone();
8204         route.paths.push(path);
8205         route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id();
8206         route.paths[0].hops[0].short_channel_id = chan_1_id;
8207         route.paths[0].hops[1].short_channel_id = chan_3_id;
8208         route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id();
8209         route.paths[1].hops[0].short_channel_id = chan_2_id;
8210         route.paths[1].hops[1].short_channel_id = chan_4_id;
8211         send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret);
8212         claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
8213 }
8214
8215 #[test]
8216 fn test_preimage_storage() {
8217         // Simple test of payment preimage storage allowing no client-side storage to claim payments
8218         let chanmon_cfgs = create_chanmon_cfgs(2);
8219         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8220         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8221         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8222
8223         create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8224
8225         {
8226                 let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 7200, None).unwrap();
8227                 let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
8228                 nodes[0].node.send_payment_with_route(&route, payment_hash,
8229                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
8230                 check_added_monitors!(nodes[0], 1);
8231                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8232                 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
8233                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8234                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8235         }
8236         // Note that after leaving the above scope we have no knowledge of any arguments or return
8237         // values from previous calls.
8238         expect_pending_htlcs_forwardable!(nodes[1]);
8239         let events = nodes[1].node.get_and_clear_pending_events();
8240         assert_eq!(events.len(), 1);
8241         match events[0] {
8242                 Event::PaymentClaimable { ref purpose, .. } => {
8243                         match &purpose {
8244                                 PaymentPurpose::InvoicePayment { payment_preimage, .. } => {
8245                                         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage.unwrap());
8246                                 },
8247                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
8248                         }
8249                 },
8250                 _ => panic!("Unexpected event"),
8251         }
8252 }
8253
8254 #[test]
8255 fn test_bad_secret_hash() {
8256         // Simple test of unregistered payment hash/invalid payment secret handling
8257         let chanmon_cfgs = create_chanmon_cfgs(2);
8258         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8259         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8260         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8261
8262         create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8263
8264         let random_payment_hash = PaymentHash([42; 32]);
8265         let random_payment_secret = PaymentSecret([43; 32]);
8266         let (our_payment_hash, our_payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 2, None).unwrap();
8267         let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
8268
8269         // All the below cases should end up being handled exactly identically, so we macro the
8270         // resulting events.
8271         macro_rules! handle_unknown_invalid_payment_data {
8272                 ($payment_hash: expr) => {
8273                         check_added_monitors!(nodes[0], 1);
8274                         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8275                         let payment_event = SendEvent::from_event(events.pop().unwrap());
8276                         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8277                         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8278
8279                         // We have to forward pending HTLCs once to process the receipt of the HTLC and then
8280                         // again to process the pending backwards-failure of the HTLC
8281                         expect_pending_htlcs_forwardable!(nodes[1]);
8282                         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment{ payment_hash: $payment_hash }]);
8283                         check_added_monitors!(nodes[1], 1);
8284
8285                         // We should fail the payment back
8286                         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
8287                         match events.pop().unwrap() {
8288                                 MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } } => {
8289                                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
8290                                         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
8291                                 },
8292                                 _ => panic!("Unexpected event"),
8293                         }
8294                 }
8295         }
8296
8297         let expected_error_code = 0x4000|15; // incorrect_or_unknown_payment_details
8298         // Error data is the HTLC value (100,000) and current block height
8299         let expected_error_data = [0, 0, 0, 0, 0, 1, 0x86, 0xa0, 0, 0, 0, CHAN_CONFIRM_DEPTH as u8];
8300
8301         // Send a payment with the right payment hash but the wrong payment secret
8302         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
8303                 RecipientOnionFields::secret_only(random_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
8304         handle_unknown_invalid_payment_data!(our_payment_hash);
8305         expect_payment_failed!(nodes[0], our_payment_hash, true, expected_error_code, expected_error_data);
8306
8307         // Send a payment with a random payment hash, but the right payment secret
8308         nodes[0].node.send_payment_with_route(&route, random_payment_hash,
8309                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(random_payment_hash.0)).unwrap();
8310         handle_unknown_invalid_payment_data!(random_payment_hash);
8311         expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
8312
8313         // Send a payment with a random payment hash and random payment secret
8314         nodes[0].node.send_payment_with_route(&route, random_payment_hash,
8315                 RecipientOnionFields::secret_only(random_payment_secret), PaymentId(random_payment_hash.0)).unwrap();
8316         handle_unknown_invalid_payment_data!(random_payment_hash);
8317         expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
8318 }
8319
8320 #[test]
8321 fn test_update_err_monitor_lockdown() {
8322         // Our monitor will lock update of local commitment transaction if a broadcastion condition
8323         // has been fulfilled (either force-close from Channel or block height requiring a HTLC-
8324         // timeout). Trying to update monitor after lockdown should return a ChannelMonitorUpdateStatus
8325         // error.
8326         //
8327         // This scenario may happen in a watchtower setup, where watchtower process a block height
8328         // triggering a timeout while a slow-block-processing ChannelManager receives a local signed
8329         // commitment at same time.
8330
8331         let chanmon_cfgs = create_chanmon_cfgs(2);
8332         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8333         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8334         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8335
8336         // Create some initial channel
8337         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
8338         let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 };
8339
8340         // Rebalance the network to generate htlc in the two directions
8341         send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
8342
8343         // Route a HTLC from node 0 to node 1 (but don't settle)
8344         let (preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
8345
8346         // Copy ChainMonitor to simulate a watchtower and update block height of node 0 until its ChannelMonitor timeout HTLC onchain
8347         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8348         let logger = test_utils::TestLogger::with_id(format!("node {}", 0));
8349         let persister = test_utils::TestPersister::new();
8350         let watchtower = {
8351                 let new_monitor = {
8352                         let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8353                         let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
8354                                         &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8355                         assert!(new_monitor == *monitor);
8356                         new_monitor
8357                 };
8358                 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8359                 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
8360                 watchtower
8361         };
8362         let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new());
8363         // Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating
8364         // transaction lock time requirements here.
8365         chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize(200, (block.clone(), 200));
8366         watchtower.chain_monitor.block_connected(&block, 200);
8367
8368         // Try to update ChannelMonitor
8369         nodes[1].node.claim_funds(preimage);
8370         check_added_monitors!(nodes[1], 1);
8371         expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
8372
8373         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8374         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
8375         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
8376         {
8377                 let mut node_0_per_peer_lock;
8378                 let mut node_0_peer_state_lock;
8379                 let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2);
8380                 if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
8381                         assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
8382                         assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8383                 } else { assert!(false); }
8384         }
8385         // Our local monitor is in-sync and hasn't processed yet timeout
8386         check_added_monitors!(nodes[0], 1);
8387         let events = nodes[0].node.get_and_clear_pending_events();
8388         assert_eq!(events.len(), 1);
8389 }
8390
8391 #[test]
8392 fn test_concurrent_monitor_claim() {
8393         // Watchtower A receives block, broadcasts state N, then channel receives new state N+1,
8394         // sending it to both watchtowers, Bob accepts N+1, then receives block and broadcasts
8395         // the latest state N+1, Alice rejects state N+1, but Bob has already broadcast it,
8396         // state N+1 confirms. Alice claims output from state N+1.
8397
8398         let chanmon_cfgs = create_chanmon_cfgs(2);
8399         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8400         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8401         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8402
8403         // Create some initial channel
8404         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
8405         let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 };
8406
8407         // Rebalance the network to generate htlc in the two directions
8408         send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
8409
8410         // Route a HTLC from node 0 to node 1 (but don't settle)
8411         route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
8412
8413         // Copy ChainMonitor to simulate watchtower Alice and update block height her ChannelMonitor timeout HTLC onchain
8414         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8415         let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice"));
8416         let persister = test_utils::TestPersister::new();
8417         let alice_broadcaster = test_utils::TestBroadcaster::with_blocks(
8418                 Arc::new(Mutex::new(nodes[0].blocks.lock().unwrap().clone())),
8419         );
8420         let watchtower_alice = {
8421                 let new_monitor = {
8422                         let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8423                         let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
8424                                         &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8425                         assert!(new_monitor == *monitor);
8426                         new_monitor
8427                 };
8428                 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &alice_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8429                 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
8430                 watchtower
8431         };
8432         let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new());
8433         // Make Alice aware of enough blocks that it doesn't think we're violating transaction lock time
8434         // requirements here.
8435         const HTLC_TIMEOUT_BROADCAST: u32 = CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS;
8436         alice_broadcaster.blocks.lock().unwrap().resize((HTLC_TIMEOUT_BROADCAST) as usize, (block.clone(), HTLC_TIMEOUT_BROADCAST));
8437         watchtower_alice.chain_monitor.block_connected(&block, HTLC_TIMEOUT_BROADCAST);
8438
8439         // Watchtower Alice should have broadcast a commitment/HTLC-timeout
8440         let alice_state = {
8441                 let mut txn = alice_broadcaster.txn_broadcast();
8442                 assert_eq!(txn.len(), 2);
8443                 txn.remove(0)
8444         };
8445
8446         // Copy ChainMonitor to simulate watchtower Bob and make it receive a commitment update first.
8447         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8448         let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob"));
8449         let persister = test_utils::TestPersister::new();
8450         let bob_broadcaster = test_utils::TestBroadcaster::with_blocks(Arc::clone(&alice_broadcaster.blocks));
8451         let watchtower_bob = {
8452                 let new_monitor = {
8453                         let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8454                         let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
8455                                         &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8456                         assert!(new_monitor == *monitor);
8457                         new_monitor
8458                 };
8459                 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &bob_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8460                 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
8461                 watchtower
8462         };
8463         watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST - 1);
8464
8465         // Route another payment to generate another update with still previous HTLC pending
8466         let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 3000000);
8467         nodes[1].node.send_payment_with_route(&route, payment_hash,
8468                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
8469         check_added_monitors!(nodes[1], 1);
8470
8471         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8472         assert_eq!(updates.update_add_htlcs.len(), 1);
8473         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]);
8474         {
8475                 let mut node_0_per_peer_lock;
8476                 let mut node_0_peer_state_lock;
8477                 let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2);
8478                 if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
8479                         // Watchtower Alice should already have seen the block and reject the update
8480                         assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
8481                         assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8482                         assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8483                 } else { assert!(false); }
8484         }
8485         // Our local monitor is in-sync and hasn't processed yet timeout
8486         check_added_monitors!(nodes[0], 1);
8487
8488         //// Provide one more block to watchtower Bob, expect broadcast of commitment and HTLC-Timeout
8489         watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST);
8490
8491         // Watchtower Bob should have broadcast a commitment/HTLC-timeout
8492         let bob_state_y;
8493         {
8494                 let mut txn = bob_broadcaster.txn_broadcast();
8495                 assert_eq!(txn.len(), 2);
8496                 bob_state_y = txn.remove(0);
8497         };
8498
8499         // We confirm Bob's state Y on Alice, she should broadcast a HTLC-timeout
8500         let height = HTLC_TIMEOUT_BROADCAST + 1;
8501         connect_blocks(&nodes[0], height - nodes[0].best_block_info().1);
8502         check_closed_broadcast(&nodes[0], 1, true);
8503         check_closed_event!(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false,
8504                 [nodes[1].node.get_our_node_id()], 100000);
8505         watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height);
8506         check_added_monitors(&nodes[0], 1);
8507         {
8508                 let htlc_txn = alice_broadcaster.txn_broadcast();
8509                 assert_eq!(htlc_txn.len(), 2);
8510                 check_spends!(htlc_txn[0], bob_state_y);
8511                 // Alice doesn't clean up the old HTLC claim since it hasn't seen a conflicting spend for
8512                 // it. However, she should, because it now has an invalid parent.
8513                 check_spends!(htlc_txn[1], alice_state);
8514         }
8515 }
8516
8517 #[test]
8518 fn test_pre_lockin_no_chan_closed_update() {
8519         // Test that if a peer closes a channel in response to a funding_created message we don't
8520         // generate a channel update (as the channel cannot appear on chain without a funding_signed
8521         // message).
8522         //
8523         // Doing so would imply a channel monitor update before the initial channel monitor
8524         // registration, violating our API guarantees.
8525         //
8526         // Previously, full_stack_target managed to hit this case by opening then closing a channel,
8527         // then opening a second channel with the same funding output as the first (which is not
8528         // rejected because the first channel does not exist in the ChannelManager) and closing it
8529         // before receiving funding_signed.
8530         let chanmon_cfgs = create_chanmon_cfgs(2);
8531         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8532         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8533         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8534
8535         // Create an initial channel
8536         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
8537         let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8538         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
8539         let accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
8540         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
8541
8542         // Move the first channel through the funding flow...
8543         let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
8544
8545         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
8546         check_added_monitors!(nodes[0], 0);
8547
8548         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
8549         let channel_id = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
8550         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
8551         assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
8552         check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true,
8553                 [nodes[1].node.get_our_node_id(); 2], 100000);
8554 }
8555
8556 #[test]
8557 fn test_htlc_no_detection() {
8558         // This test is a mutation to underscore the detection logic bug we had
8559         // before #653. HTLC value routed is above the remaining balance, thus
8560         // inverting HTLC and `to_remote` output. HTLC will come second and
8561         // it wouldn't be seen by pre-#653 detection as we were enumerate()'ing
8562         // on a watched outputs vector (Vec<TxOut>) thus implicitly relying on
8563         // outputs order detection for correct spending children filtring.
8564
8565         let chanmon_cfgs = create_chanmon_cfgs(2);
8566         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8567         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8568         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8569
8570         // Create some initial channels
8571         let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
8572
8573         send_payment(&nodes[0], &vec!(&nodes[1])[..], 1_000_000);
8574         let (_, our_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 2_000_000);
8575         let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
8576         assert_eq!(local_txn[0].input.len(), 1);
8577         assert_eq!(local_txn[0].output.len(), 3);
8578         check_spends!(local_txn[0], chan_1.3);
8579
8580         // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
8581         let block = create_dummy_block(nodes[0].best_block_hash(), 42, vec![local_txn[0].clone()]);
8582         connect_block(&nodes[0], &block);
8583         // We deliberately connect the local tx twice as this should provoke a failure calling
8584         // this test before #653 fix.
8585         chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &block, nodes[0].best_block_info().1 + 1);
8586         check_closed_broadcast!(nodes[0], true);
8587         check_added_monitors!(nodes[0], 1);
8588         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
8589         connect_blocks(&nodes[0], TEST_FINAL_CLTV);
8590
8591         let htlc_timeout = {
8592                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8593                 assert_eq!(node_txn.len(), 1);
8594                 assert_eq!(node_txn[0].input.len(), 1);
8595                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
8596                 check_spends!(node_txn[0], local_txn[0]);
8597                 node_txn[0].clone()
8598         };
8599
8600         connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![htlc_timeout.clone()]));
8601         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
8602         expect_payment_failed!(nodes[0], our_payment_hash, false);
8603 }
8604
8605 fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain_before_fulfill: bool) {
8606         // If we route an HTLC, then learn the HTLC's preimage after the upstream channel has been
8607         // force-closed, we must claim that HTLC on-chain. (Given an HTLC forwarded from Alice --> Bob -->
8608         // Carol, Alice would be the upstream node, and Carol the downstream.)
8609         //
8610         // Steps of the test:
8611         // 1) Alice sends a HTLC to Carol through Bob.
8612         // 2) Carol doesn't settle the HTLC.
8613         // 3) If broadcast_alice is true, Alice force-closes her channel with Bob. Else Bob force closes.
8614         // Steps 4 and 5 may be reordered depending on go_onchain_before_fulfill.
8615         // 4) Bob sees the Alice's commitment on his chain or vice versa. An offered output is present
8616         //    but can't be claimed as Bob doesn't have yet knowledge of the preimage.
8617         // 5) Carol release the preimage to Bob off-chain.
8618         // 6) Bob claims the offered output on the broadcasted commitment.
8619         let chanmon_cfgs = create_chanmon_cfgs(3);
8620         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
8621         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
8622         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
8623
8624         // Create some initial channels
8625         let chan_ab = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
8626         create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 10001);
8627
8628         // Steps (1) and (2):
8629         // Send an HTLC Alice --> Bob --> Carol, but Carol doesn't settle the HTLC back.
8630         let (payment_preimage, payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
8631
8632         // Check that Alice's commitment transaction now contains an output for this HTLC.
8633         let alice_txn = get_local_commitment_txn!(nodes[0], chan_ab.2);
8634         check_spends!(alice_txn[0], chan_ab.3);
8635         assert_eq!(alice_txn[0].output.len(), 2);
8636         check_spends!(alice_txn[1], alice_txn[0]); // 2nd transaction is a non-final HTLC-timeout
8637         assert_eq!(alice_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
8638         assert_eq!(alice_txn.len(), 2);
8639
8640         // Steps (3) and (4):
8641         // If `go_onchain_before_fufill`, broadcast the relevant commitment transaction and check that Bob
8642         // responds by (1) broadcasting a channel update and (2) adding a new ChannelMonitor.
8643         let mut force_closing_node = 0; // Alice force-closes
8644         let mut counterparty_node = 1; // Bob if Alice force-closes
8645
8646         // Bob force-closes
8647         if !broadcast_alice {
8648                 force_closing_node = 1;
8649                 counterparty_node = 0;
8650         }
8651         nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
8652         check_closed_broadcast!(nodes[force_closing_node], true);
8653         check_added_monitors!(nodes[force_closing_node], 1);
8654         check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed, [nodes[counterparty_node].node.get_our_node_id()], 100000);
8655         if go_onchain_before_fulfill {
8656                 let txn_to_broadcast = match broadcast_alice {
8657                         true => alice_txn.clone(),
8658                         false => get_local_commitment_txn!(nodes[1], chan_ab.2)
8659                 };
8660                 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()]));
8661                 if broadcast_alice {
8662                         check_closed_broadcast!(nodes[1], true);
8663                         check_added_monitors!(nodes[1], 1);
8664                         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
8665                 }
8666         }
8667
8668         // Step (5):
8669         // Carol then claims the funds and sends an update_fulfill message to Bob, and they go through the
8670         // process of removing the HTLC from their commitment transactions.
8671         nodes[2].node.claim_funds(payment_preimage);
8672         check_added_monitors!(nodes[2], 1);
8673         expect_payment_claimed!(nodes[2], payment_hash, 3_000_000);
8674
8675         let carol_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
8676         assert!(carol_updates.update_add_htlcs.is_empty());
8677         assert!(carol_updates.update_fail_htlcs.is_empty());
8678         assert!(carol_updates.update_fail_malformed_htlcs.is_empty());
8679         assert!(carol_updates.update_fee.is_none());
8680         assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1);
8681
8682         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]);
8683         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], if go_onchain_before_fulfill || force_closing_node == 1 { None } else { Some(1000) }, false, false);
8684         // If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage.
8685         if !go_onchain_before_fulfill && broadcast_alice {
8686                 let events = nodes[1].node.get_and_clear_pending_msg_events();
8687                 assert_eq!(events.len(), 1);
8688                 match events[0] {
8689                         MessageSendEvent::UpdateHTLCs { ref node_id, .. } => {
8690                                 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8691                         },
8692                         _ => panic!("Unexpected event"),
8693                 };
8694         }
8695         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &carol_updates.commitment_signed);
8696         // One monitor update for the preimage to update the Bob<->Alice channel, one monitor update
8697         // Carol<->Bob's updated commitment transaction info.
8698         check_added_monitors!(nodes[1], 2);
8699
8700         let events = nodes[1].node.get_and_clear_pending_msg_events();
8701         assert_eq!(events.len(), 2);
8702         let bob_revocation = match events[0] {
8703                 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
8704                         assert_eq!(*node_id, nodes[2].node.get_our_node_id());
8705                         (*msg).clone()
8706                 },
8707                 _ => panic!("Unexpected event"),
8708         };
8709         let bob_updates = match events[1] {
8710                 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
8711                         assert_eq!(*node_id, nodes[2].node.get_our_node_id());
8712                         (*updates).clone()
8713                 },
8714                 _ => panic!("Unexpected event"),
8715         };
8716
8717         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revocation);
8718         check_added_monitors!(nodes[2], 1);
8719         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_updates.commitment_signed);
8720         check_added_monitors!(nodes[2], 1);
8721
8722         let events = nodes[2].node.get_and_clear_pending_msg_events();
8723         assert_eq!(events.len(), 1);
8724         let carol_revocation = match events[0] {
8725                 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
8726                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
8727                         (*msg).clone()
8728                 },
8729                 _ => panic!("Unexpected event"),
8730         };
8731         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &carol_revocation);
8732         check_added_monitors!(nodes[1], 1);
8733
8734         // If this test requires the force-closed channel to not be on-chain until after the fulfill,
8735         // here's where we put said channel's commitment tx on-chain.
8736         let mut txn_to_broadcast = alice_txn.clone();
8737         if !broadcast_alice { txn_to_broadcast = get_local_commitment_txn!(nodes[1], chan_ab.2); }
8738         if !go_onchain_before_fulfill {
8739                 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()]));
8740                 // If Bob was the one to force-close, he will have already passed these checks earlier.
8741                 if broadcast_alice {
8742                         check_closed_broadcast!(nodes[1], true);
8743                         check_added_monitors!(nodes[1], 1);
8744                         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
8745                 }
8746                 let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
8747                 if broadcast_alice {
8748                         assert_eq!(bob_txn.len(), 1);
8749                         check_spends!(bob_txn[0], txn_to_broadcast[0]);
8750                 } else {
8751                         assert_eq!(bob_txn.len(), 2);
8752                         check_spends!(bob_txn[0], chan_ab.3);
8753                 }
8754         }
8755
8756         // Step (6):
8757         // Finally, check that Bob broadcasted a preimage-claiming transaction for the HTLC output on the
8758         // broadcasted commitment transaction.
8759         {
8760                 let script_weight = match broadcast_alice {
8761                         true => OFFERED_HTLC_SCRIPT_WEIGHT,
8762                         false => ACCEPTED_HTLC_SCRIPT_WEIGHT
8763                 };
8764                 // If Alice force-closed, Bob only broadcasts a HTLC-output-claiming transaction. Otherwise,
8765                 // Bob force-closed and broadcasts the commitment transaction along with a
8766                 // HTLC-output-claiming transaction.
8767                 let bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
8768                 if broadcast_alice {
8769                         assert_eq!(bob_txn.len(), 1);
8770                         check_spends!(bob_txn[0], txn_to_broadcast[0]);
8771                         assert_eq!(bob_txn[0].input[0].witness.last().unwrap().len(), script_weight);
8772                 } else {
8773                         assert_eq!(bob_txn.len(), 2);
8774                         check_spends!(bob_txn[1], txn_to_broadcast[0]);
8775                         assert_eq!(bob_txn[1].input[0].witness.last().unwrap().len(), script_weight);
8776                 }
8777         }
8778 }
8779
8780 #[test]
8781 fn test_onchain_htlc_settlement_after_close() {
8782         do_test_onchain_htlc_settlement_after_close(true, true);
8783         do_test_onchain_htlc_settlement_after_close(false, true); // Technically redundant, but may as well
8784         do_test_onchain_htlc_settlement_after_close(true, false);
8785         do_test_onchain_htlc_settlement_after_close(false, false);
8786 }
8787
8788 #[test]
8789 fn test_duplicate_temporary_channel_id_from_different_peers() {
8790         // Tests that we can accept two different `OpenChannel` requests with the same
8791         // `temporary_channel_id`, as long as they are from different peers.
8792         let chanmon_cfgs = create_chanmon_cfgs(3);
8793         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
8794         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
8795         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
8796
8797         // Create an first channel channel
8798         nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
8799         let mut open_chan_msg_chan_1_0 = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
8800
8801         // Create an second channel
8802         nodes[2].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
8803         let mut open_chan_msg_chan_2_0 = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
8804
8805         // Modify the `OpenChannel` from `nodes[2]` to `nodes[0]` to ensure that it uses the same
8806         // `temporary_channel_id` as the `OpenChannel` from nodes[1] to nodes[0].
8807         open_chan_msg_chan_2_0.temporary_channel_id = open_chan_msg_chan_1_0.temporary_channel_id;
8808
8809         // Assert that `nodes[0]` can accept both `OpenChannel` requests, even though they use the same
8810         // `temporary_channel_id` as they are from different peers.
8811         nodes[0].node.handle_open_channel(&nodes[1].node.get_our_node_id(), &open_chan_msg_chan_1_0);
8812         {
8813                 let events = nodes[0].node.get_and_clear_pending_msg_events();
8814                 assert_eq!(events.len(), 1);
8815                 match &events[0] {
8816                         MessageSendEvent::SendAcceptChannel { node_id, msg } => {
8817                                 assert_eq!(node_id, &nodes[1].node.get_our_node_id());
8818                                 assert_eq!(msg.temporary_channel_id, open_chan_msg_chan_1_0.temporary_channel_id);
8819                         },
8820                         _ => panic!("Unexpected event"),
8821                 }
8822         }
8823
8824         nodes[0].node.handle_open_channel(&nodes[2].node.get_our_node_id(), &open_chan_msg_chan_2_0);
8825         {
8826                 let events = nodes[0].node.get_and_clear_pending_msg_events();
8827                 assert_eq!(events.len(), 1);
8828                 match &events[0] {
8829                         MessageSendEvent::SendAcceptChannel { node_id, msg } => {
8830                                 assert_eq!(node_id, &nodes[2].node.get_our_node_id());
8831                                 assert_eq!(msg.temporary_channel_id, open_chan_msg_chan_1_0.temporary_channel_id);
8832                         },
8833                         _ => panic!("Unexpected event"),
8834                 }
8835         }
8836 }
8837
8838 #[test]
8839 fn test_duplicate_chan_id() {
8840         // Test that if a given peer tries to open a channel with the same channel_id as one that is
8841         // already open we reject it and keep the old channel.
8842         //
8843         // Previously, full_stack_target managed to figure out that if you tried to open two channels
8844         // with the same funding output (ie post-funding channel_id), we'd create a monitor update for
8845         // the existing channel when we detect the duplicate new channel, screwing up our monitor
8846         // updating logic for the existing channel.
8847         let chanmon_cfgs = create_chanmon_cfgs(2);
8848         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8849         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8850         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8851
8852         // Create an initial channel
8853         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
8854         let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8855         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
8856         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
8857
8858         // Try to create a second channel with the same temporary_channel_id as the first and check
8859         // that it is rejected.
8860         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
8861         {
8862                 let events = nodes[1].node.get_and_clear_pending_msg_events();
8863                 assert_eq!(events.len(), 1);
8864                 match events[0] {
8865                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
8866                                 // Technically, at this point, nodes[1] would be justified in thinking both the
8867                                 // first (valid) and second (invalid) channels are closed, given they both have
8868                                 // the same non-temporary channel_id. However, currently we do not, so we just
8869                                 // move forward with it.
8870                                 assert_eq!(msg.channel_id, open_chan_msg.temporary_channel_id);
8871                                 assert_eq!(node_id, nodes[0].node.get_our_node_id());
8872                         },
8873                         _ => panic!("Unexpected event"),
8874                 }
8875         }
8876
8877         // Move the first channel through the funding flow...
8878         let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
8879
8880         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
8881         check_added_monitors!(nodes[0], 0);
8882
8883         let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
8884         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
8885         {
8886                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
8887                 assert_eq!(added_monitors.len(), 1);
8888                 assert_eq!(added_monitors[0].0, funding_output);
8889                 added_monitors.clear();
8890         }
8891         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
8892
8893         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
8894
8895         let funding_outpoint = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index };
8896         let channel_id = funding_outpoint.to_channel_id();
8897
8898         // Now we have the first channel past funding_created (ie it has a txid-based channel_id, not a
8899         // temporary one).
8900
8901         // First try to open a second channel with a temporary channel id equal to the txid-based one.
8902         // Technically this is allowed by the spec, but we don't support it and there's little reason
8903         // to. Still, it shouldn't cause any other issues.
8904         open_chan_msg.temporary_channel_id = channel_id;
8905         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
8906         {
8907                 let events = nodes[1].node.get_and_clear_pending_msg_events();
8908                 assert_eq!(events.len(), 1);
8909                 match events[0] {
8910                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
8911                                 // Technically, at this point, nodes[1] would be justified in thinking both
8912                                 // channels are closed, but currently we do not, so we just move forward with it.
8913                                 assert_eq!(msg.channel_id, open_chan_msg.temporary_channel_id);
8914                                 assert_eq!(node_id, nodes[0].node.get_our_node_id());
8915                         },
8916                         _ => panic!("Unexpected event"),
8917                 }
8918         }
8919
8920         // Now try to create a second channel which has a duplicate funding output.
8921         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
8922         let open_chan_2_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8923         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_2_msg);
8924         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
8925         create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); // Get and check the FundingGenerationReady event
8926
8927         let (_, funding_created) = {
8928                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
8929                 let mut a_peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
8930                 // Once we call `get_funding_created` the channel has a duplicate channel_id as
8931                 // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
8932                 // try to create another channel. Instead, we drop the channel entirely here (leaving the
8933                 // channelmanager in a possibly nonsense state instead).
8934                 let mut as_chan = a_peer_state.outbound_v1_channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap();
8935                 let logger = test_utils::TestLogger::new();
8936                 as_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap()
8937         };
8938         check_added_monitors!(nodes[0], 0);
8939         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
8940         // At this point we'll look up if the channel_id is present and immediately fail the channel
8941         // without trying to persist the `ChannelMonitor`.
8942         check_added_monitors!(nodes[1], 0);
8943
8944         // ...still, nodes[1] will reject the duplicate channel.
8945         {
8946                 let events = nodes[1].node.get_and_clear_pending_msg_events();
8947                 assert_eq!(events.len(), 1);
8948                 match events[0] {
8949                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
8950                                 // Technically, at this point, nodes[1] would be justified in thinking both
8951                                 // channels are closed, but currently we do not, so we just move forward with it.
8952                                 assert_eq!(msg.channel_id, channel_id);
8953                                 assert_eq!(node_id, nodes[0].node.get_our_node_id());
8954                         },
8955                         _ => panic!("Unexpected event"),
8956                 }
8957         }
8958
8959         // finally, finish creating the original channel and send a payment over it to make sure
8960         // everything is functional.
8961         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
8962         {
8963                 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
8964                 assert_eq!(added_monitors.len(), 1);
8965                 assert_eq!(added_monitors[0].0, funding_output);
8966                 added_monitors.clear();
8967         }
8968         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
8969
8970         let events_4 = nodes[0].node.get_and_clear_pending_events();
8971         assert_eq!(events_4.len(), 0);
8972         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
8973         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
8974
8975         let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
8976         let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
8977         update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
8978
8979         send_payment(&nodes[0], &[&nodes[1]], 8000000);
8980 }
8981
8982 #[test]
8983 fn test_error_chans_closed() {
8984         // Test that we properly handle error messages, closing appropriate channels.
8985         //
8986         // Prior to #787 we'd allow a peer to make us force-close a channel we had with a different
8987         // peer. The "real" fix for that is to index channels with peers_ids, however in the mean time
8988         // we can test various edge cases around it to ensure we don't regress.
8989         let chanmon_cfgs = create_chanmon_cfgs(3);
8990         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
8991         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
8992         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
8993
8994         // Create some initial channels
8995         let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
8996         let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
8997         let chan_3 = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100000, 10001);
8998
8999         assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
9000         assert_eq!(nodes[1].node.list_usable_channels().len(), 2);
9001         assert_eq!(nodes[2].node.list_usable_channels().len(), 1);
9002
9003         // Closing a channel from a different peer has no effect
9004         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_3.2, data: "ERR".to_owned() });
9005         assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
9006
9007         // Closing one channel doesn't impact others
9008         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
9009         check_added_monitors!(nodes[0], 1);
9010         check_closed_broadcast!(nodes[0], false);
9011         check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) },
9012                 [nodes[1].node.get_our_node_id()], 100000);
9013         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
9014         assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
9015         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
9016         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_3.2);
9017
9018         // A null channel ID should close all channels
9019         let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9020         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() });
9021         check_added_monitors!(nodes[0], 2);
9022         check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) },
9023                 [nodes[1].node.get_our_node_id(); 2], 100000);
9024         let events = nodes[0].node.get_and_clear_pending_msg_events();
9025         assert_eq!(events.len(), 2);
9026         match events[0] {
9027                 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
9028                         assert_eq!(msg.contents.flags & 2, 2);
9029                 },
9030                 _ => panic!("Unexpected event"),
9031         }
9032         match events[1] {
9033                 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
9034                         assert_eq!(msg.contents.flags & 2, 2);
9035                 },
9036                 _ => panic!("Unexpected event"),
9037         }
9038         // Note that at this point users of a standard PeerHandler will end up calling
9039         // peer_disconnected.
9040         assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
9041         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
9042
9043         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
9044         assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
9045         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
9046 }
9047
9048 #[test]
9049 fn test_invalid_funding_tx() {
9050         // Test that we properly handle invalid funding transactions sent to us from a peer.
9051         //
9052         // Previously, all other major lightning implementations had failed to properly sanitize
9053         // funding transactions from their counterparties, leading to a multi-implementation critical
9054         // security vulnerability (though we always sanitized properly, we've previously had
9055         // un-released crashes in the sanitization process).
9056         //
9057         // Further, if the funding transaction is consensus-valid, confirms, and is later spent, we'd
9058         // previously have crashed in `ChannelMonitor` even though we closed the channel as bogus and
9059         // gave up on it. We test this here by generating such a transaction.
9060         let chanmon_cfgs = create_chanmon_cfgs(2);
9061         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9062         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9063         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9064
9065         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 10_000, 42, None).unwrap();
9066         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
9067         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9068
9069         let (temporary_channel_id, mut tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
9070
9071         // Create a witness program which can be spent by a 4-empty-stack-elements witness and which is
9072         // 136 bytes long. This matches our "accepted HTLC preimage spend" matching, previously causing
9073         // a panic as we'd try to extract a 32 byte preimage from a witness element without checking
9074         // its length.
9075         let mut wit_program: Vec<u8> = channelmonitor::deliberately_bogus_accepted_htlc_witness_program();
9076         let wit_program_script: Script = wit_program.into();
9077         for output in tx.output.iter_mut() {
9078                 // Make the confirmed funding transaction have a bogus script_pubkey
9079                 output.script_pubkey = Script::new_v0_p2wsh(&wit_program_script.wscript_hash());
9080         }
9081
9082         nodes[0].node.funding_transaction_generated_unchecked(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone(), 0).unwrap();
9083         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
9084         check_added_monitors!(nodes[1], 1);
9085         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9086
9087         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
9088         check_added_monitors!(nodes[0], 1);
9089         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9090
9091         let events_1 = nodes[0].node.get_and_clear_pending_events();
9092         assert_eq!(events_1.len(), 0);
9093
9094         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
9095         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
9096         nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
9097
9098         let expected_err = "funding tx had wrong script/value or output index";
9099         confirm_transaction_at(&nodes[1], &tx, 1);
9100         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() },
9101                 [nodes[0].node.get_our_node_id()], 100000);
9102         check_added_monitors!(nodes[1], 1);
9103         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
9104         assert_eq!(events_2.len(), 1);
9105         if let MessageSendEvent::HandleError { node_id, action } = &events_2[0] {
9106                 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
9107                 if let msgs::ErrorAction::SendErrorMessage { msg } = action {
9108                         assert_eq!(msg.data, "Channel closed because of an exception: ".to_owned() + expected_err);
9109                 } else { panic!(); }
9110         } else { panic!(); }
9111         assert_eq!(nodes[1].node.list_channels().len(), 0);
9112
9113         // Now confirm a spend of the (bogus) funding transaction. As long as the witness is 5 elements
9114         // long the ChannelMonitor will try to read 32 bytes from the second-to-last element, panicing
9115         // as its not 32 bytes long.
9116         let mut spend_tx = Transaction {
9117                 version: 2i32, lock_time: PackedLockTime::ZERO,
9118                 input: tx.output.iter().enumerate().map(|(idx, _)| TxIn {
9119                         previous_output: BitcoinOutPoint {
9120                                 txid: tx.txid(),
9121                                 vout: idx as u32,
9122                         },
9123                         script_sig: Script::new(),
9124                         sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
9125                         witness: Witness::from_vec(channelmonitor::deliberately_bogus_accepted_htlc_witness())
9126                 }).collect(),
9127                 output: vec![TxOut {
9128                         value: 1000,
9129                         script_pubkey: Script::new(),
9130                 }]
9131         };
9132         check_spends!(spend_tx, tx);
9133         mine_transaction(&nodes[1], &spend_tx);
9134 }
9135
9136 fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_timelock: bool) {
9137         // In the first version of the chain::Confirm interface, after a refactor was made to not
9138         // broadcast CSV-locked transactions until their CSV lock is up, we wouldn't reliably broadcast
9139         // transactions after a `transactions_confirmed` call. Specifically, if the chain, provided via
9140         // `best_block_updated` is at height N, and a transaction output which we wish to spend at
9141         // height N-1 (due to a CSV to height N-1) is provided at height N, we will not broadcast the
9142         // spending transaction until height N+1 (or greater). This was due to the way
9143         // `ChannelMonitor::transactions_confirmed` worked, only checking if we should broadcast a
9144         // spending transaction at the height the input transaction was confirmed at, not whether we
9145         // should broadcast a spending transaction at the current height.
9146         // A second, similar, issue involved failing HTLCs backwards - because we only provided the
9147         // height at which transactions were confirmed to `OnchainTx::update_claims_view`, it wasn't
9148         // aware that the anti-reorg-delay had, in fact, already expired, waiting to fail-backwards
9149         // until we learned about an additional block.
9150         //
9151         // As an additional check, if `test_height_before_timelock` is set, we instead test that we
9152         // aren't broadcasting transactions too early (ie not broadcasting them at all).
9153         let chanmon_cfgs = create_chanmon_cfgs(3);
9154         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9155         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9156         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9157         *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
9158
9159         create_announced_chan_between_nodes(&nodes, 0, 1);
9160         let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
9161         let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
9162         nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
9163         nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
9164
9165         nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
9166         check_closed_broadcast!(nodes[1], true);
9167         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
9168         check_added_monitors!(nodes[1], 1);
9169         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
9170         assert_eq!(node_txn.len(), 1);
9171
9172         let conf_height = nodes[1].best_block_info().1;
9173         if !test_height_before_timelock {
9174                 connect_blocks(&nodes[1], 24 * 6);
9175         }
9176         nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
9177                 &nodes[1].get_block_header(conf_height), &[(0, &node_txn[0])], conf_height);
9178         if test_height_before_timelock {
9179                 // If we confirmed the close transaction, but timelocks have not yet expired, we should not
9180                 // generate any events or broadcast any transactions
9181                 assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
9182                 assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
9183         } else {
9184                 // We should broadcast an HTLC transaction spending our funding transaction first
9185                 let spending_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
9186                 assert_eq!(spending_txn.len(), 2);
9187                 assert_eq!(spending_txn[0].txid(), node_txn[0].txid());
9188                 check_spends!(spending_txn[1], node_txn[0]);
9189                 // We should also generate a SpendableOutputs event with the to_self output (as its
9190                 // timelock is up).
9191                 let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
9192                 assert_eq!(descriptor_spend_txn.len(), 1);
9193
9194                 // If we also discover that the HTLC-Timeout transaction was confirmed some time ago, we
9195                 // should immediately fail-backwards the HTLC to the previous hop, without waiting for an
9196                 // additional block built on top of the current chain.
9197                 nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
9198                         &nodes[1].get_block_header(conf_height + 1), &[(0, &spending_txn[1])], conf_height + 1);
9199                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]);
9200                 check_added_monitors!(nodes[1], 1);
9201
9202                 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9203                 assert!(updates.update_add_htlcs.is_empty());
9204                 assert!(updates.update_fulfill_htlcs.is_empty());
9205                 assert_eq!(updates.update_fail_htlcs.len(), 1);
9206                 assert!(updates.update_fail_malformed_htlcs.is_empty());
9207                 assert!(updates.update_fee.is_none());
9208                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
9209                 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
9210                 expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_announce.contents.short_channel_id, true);
9211         }
9212 }
9213
9214 #[test]
9215 fn test_tx_confirmed_skipping_blocks_immediate_broadcast() {
9216         do_test_tx_confirmed_skipping_blocks_immediate_broadcast(false);
9217         do_test_tx_confirmed_skipping_blocks_immediate_broadcast(true);
9218 }
9219
9220 fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) {
9221         let chanmon_cfgs = create_chanmon_cfgs(2);
9222         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9223         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9224         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9225
9226         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9227
9228         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
9229                 .with_bolt11_features(nodes[1].node.invoice_features()).unwrap();
9230         let route = get_route!(nodes[0], payment_params, 10_000).unwrap();
9231
9232         let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[1]);
9233
9234         {
9235                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
9236                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
9237                 check_added_monitors!(nodes[0], 1);
9238                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9239                 assert_eq!(events.len(), 1);
9240                 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
9241                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9242                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
9243         }
9244         expect_pending_htlcs_forwardable!(nodes[1]);
9245         expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 10_000);
9246
9247         {
9248                 // Note that we use a different PaymentId here to allow us to duplicativly pay
9249                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
9250                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_secret.0)).unwrap();
9251                 check_added_monitors!(nodes[0], 1);
9252                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9253                 assert_eq!(events.len(), 1);
9254                 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
9255                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9256                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
9257                 // At this point, nodes[1] would notice it has too much value for the payment. It will
9258                 // assume the second is a privacy attack (no longer particularly relevant
9259                 // post-payment_secrets) and fail back the new HTLC. Previously, it'd also have failed back
9260                 // the first HTLC delivered above.
9261         }
9262
9263         expect_pending_htlcs_forwardable_ignore!(nodes[1]);
9264         nodes[1].node.process_pending_htlc_forwards();
9265
9266         if test_for_second_fail_panic {
9267                 // Now we go fail back the first HTLC from the user end.
9268                 nodes[1].node.fail_htlc_backwards(&our_payment_hash);
9269
9270                 let expected_destinations = vec![
9271                         HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
9272                         HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
9273                 ];
9274                 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1],  expected_destinations);
9275                 nodes[1].node.process_pending_htlc_forwards();
9276
9277                 check_added_monitors!(nodes[1], 1);
9278                 let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9279                 assert_eq!(fail_updates_1.update_fail_htlcs.len(), 2);
9280
9281                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9282                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[1]);
9283                 commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
9284
9285                 let failure_events = nodes[0].node.get_and_clear_pending_events();
9286                 assert_eq!(failure_events.len(), 4);
9287                 if let Event::PaymentPathFailed { .. } = failure_events[0] {} else { panic!(); }
9288                 if let Event::PaymentFailed { .. } = failure_events[1] {} else { panic!(); }
9289                 if let Event::PaymentPathFailed { .. } = failure_events[2] {} else { panic!(); }
9290                 if let Event::PaymentFailed { .. } = failure_events[3] {} else { panic!(); }
9291         } else {
9292                 // Let the second HTLC fail and claim the first
9293                 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
9294                 nodes[1].node.process_pending_htlc_forwards();
9295
9296                 check_added_monitors!(nodes[1], 1);
9297                 let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9298                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9299                 commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
9300
9301                 expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new());
9302
9303                 claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
9304         }
9305 }
9306
9307 #[test]
9308 fn test_dup_htlc_second_fail_panic() {
9309         // Previously, if we received two HTLCs back-to-back, where the second overran the expected
9310         // value for the payment, we'd fail back both HTLCs after generating a `PaymentClaimable` event.
9311         // Then, if the user failed the second payment, they'd hit a "tried to fail an already failed
9312         // HTLC" debug panic. This tests for this behavior, checking that only one HTLC is auto-failed.
9313         do_test_dup_htlc_second_rejected(true);
9314 }
9315
9316 #[test]
9317 fn test_dup_htlc_second_rejected() {
9318         // Test that if we receive a second HTLC for an MPP payment that overruns the payment amount we
9319         // simply reject the second HTLC but are still able to claim the first HTLC.
9320         do_test_dup_htlc_second_rejected(false);
9321 }
9322
9323 #[test]
9324 fn test_inconsistent_mpp_params() {
9325         // Test that if we recieve two HTLCs with different payment parameters we fail back the first
9326         // such HTLC and allow the second to stay.
9327         let chanmon_cfgs = create_chanmon_cfgs(4);
9328         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9329         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9330         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9331
9332         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0);
9333         create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0);
9334         create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0);
9335         let chan_2_3 =create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0);
9336
9337         let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV)
9338                 .with_bolt11_features(nodes[3].node.invoice_features()).unwrap();
9339         let mut route = get_route!(nodes[0], payment_params, 15_000_000).unwrap();
9340         assert_eq!(route.paths.len(), 2);
9341         route.paths.sort_by(|path_a, _| {
9342                 // Sort the path so that the path through nodes[1] comes first
9343                 if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() {
9344                         core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
9345         });
9346
9347         let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[3]);
9348
9349         let cur_height = nodes[0].best_block_info().1;
9350         let payment_id = PaymentId([42; 32]);
9351
9352         let session_privs = {
9353                 // We create a fake route here so that we start with three pending HTLCs, which we'll
9354                 // ultimately have, just not right away.
9355                 let mut dup_route = route.clone();
9356                 dup_route.paths.push(route.paths[1].clone());
9357                 nodes[0].node.test_add_new_pending_payment(our_payment_hash,
9358                         RecipientOnionFields::secret_only(our_payment_secret), payment_id, &dup_route).unwrap()
9359         };
9360         nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash,
9361                 RecipientOnionFields::secret_only(our_payment_secret), 15_000_000, cur_height, payment_id,
9362                 &None, session_privs[0]).unwrap();
9363         check_added_monitors!(nodes[0], 1);
9364
9365         {
9366                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9367                 assert_eq!(events.len(), 1);
9368                 pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), false, None);
9369         }
9370         assert!(nodes[3].node.get_and_clear_pending_events().is_empty());
9371
9372         nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash,
9373                 RecipientOnionFields::secret_only(our_payment_secret), 14_000_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
9374         check_added_monitors!(nodes[0], 1);
9375
9376         {
9377                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9378                 assert_eq!(events.len(), 1);
9379                 let payment_event = SendEvent::from_event(events.pop().unwrap());
9380
9381                 nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9382                 commitment_signed_dance!(nodes[2], nodes[0], payment_event.commitment_msg, false);
9383
9384                 expect_pending_htlcs_forwardable!(nodes[2]);
9385                 check_added_monitors!(nodes[2], 1);
9386
9387                 let mut events = nodes[2].node.get_and_clear_pending_msg_events();
9388                 assert_eq!(events.len(), 1);
9389                 let payment_event = SendEvent::from_event(events.pop().unwrap());
9390
9391                 nodes[3].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
9392                 check_added_monitors!(nodes[3], 0);
9393                 commitment_signed_dance!(nodes[3], nodes[2], payment_event.commitment_msg, true, true);
9394
9395                 // At this point, nodes[3] should notice the two HTLCs don't contain the same total payment
9396                 // amount. It will assume the second is a privacy attack (no longer particularly relevant
9397                 // post-payment_secrets) and fail back the new HTLC.
9398         }
9399         expect_pending_htlcs_forwardable_ignore!(nodes[3]);
9400         nodes[3].node.process_pending_htlc_forwards();
9401         expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
9402         nodes[3].node.process_pending_htlc_forwards();
9403
9404         check_added_monitors!(nodes[3], 1);
9405
9406         let fail_updates_1 = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
9407         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9408         commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false);
9409
9410         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]);
9411         check_added_monitors!(nodes[2], 1);
9412
9413         let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
9414         nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &fail_updates_2.update_fail_htlcs[0]);
9415         commitment_signed_dance!(nodes[0], nodes[2], fail_updates_2.commitment_signed, false);
9416
9417         expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain());
9418
9419         nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash,
9420                 RecipientOnionFields::secret_only(our_payment_secret), 15_000_000, cur_height, payment_id,
9421                 &None, session_privs[2]).unwrap();
9422         check_added_monitors!(nodes[0], 1);
9423
9424         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9425         assert_eq!(events.len(), 1);
9426         pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), true, None);
9427
9428         do_claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, our_payment_preimage);
9429         expect_payment_sent(&nodes[0], our_payment_preimage, Some(None), true, true);
9430 }
9431
9432 #[test]
9433 fn test_double_partial_claim() {
9434         // Test what happens if a node receives a payment, generates a PaymentClaimable event, the HTLCs
9435         // time out, the sender resends only some of the MPP parts, then the user processes the
9436         // PaymentClaimable event, ensuring they don't inadvertently claim only part of the full payment
9437         // amount.
9438         let chanmon_cfgs = create_chanmon_cfgs(4);
9439         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9440         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9441         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9442
9443         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0);
9444         create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0);
9445         create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0);
9446         create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0);
9447
9448         let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000);
9449         assert_eq!(route.paths.len(), 2);
9450         route.paths.sort_by(|path_a, _| {
9451                 // Sort the path so that the path through nodes[1] comes first
9452                 if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() {
9453                         core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
9454         });
9455
9456         send_along_route_with_secret(&nodes[0], route.clone(), &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 15_000_000, payment_hash, payment_secret);
9457         // nodes[3] has now received a PaymentClaimable event...which it will take some (exorbitant)
9458         // amount of time to respond to.
9459
9460         // Connect some blocks to time out the payment
9461         connect_blocks(&nodes[3], TEST_FINAL_CLTV);
9462         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // To get the same height for sending later
9463
9464         let failed_destinations = vec![
9465                 HTLCDestination::FailedPayment { payment_hash },
9466                 HTLCDestination::FailedPayment { payment_hash },
9467         ];
9468         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations);
9469
9470         pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash, PaymentFailureReason::RecipientRejected);
9471
9472         // nodes[1] now retries one of the two paths...
9473         nodes[0].node.send_payment_with_route(&route, payment_hash,
9474                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9475         check_added_monitors!(nodes[0], 2);
9476
9477         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9478         assert_eq!(events.len(), 2);
9479         let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
9480         pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_1_msgs, false, None);
9481
9482         // At this point nodes[3] has received one half of the payment, and the user goes to handle
9483         // that PaymentClaimable event they got hours ago and never handled...we should refuse to claim.
9484         nodes[3].node.claim_funds(payment_preimage);
9485         check_added_monitors!(nodes[3], 0);
9486         assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty());
9487 }
9488
9489 /// The possible events which may trigger a `max_dust_htlc_exposure` breach
9490 #[derive(Clone, Copy, PartialEq)]
9491 enum ExposureEvent {
9492         /// Breach occurs at HTLC forwarding (see `send_htlc`)
9493         AtHTLCForward,
9494         /// Breach occurs at HTLC reception (see `update_add_htlc`)
9495         AtHTLCReception,
9496         /// Breach occurs at outbound update_fee (see `send_update_fee`)
9497         AtUpdateFeeOutbound,
9498 }
9499
9500 fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool, multiplier_dust_limit: bool) {
9501         // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat`
9502         // policy.
9503         //
9504         // At HTLC forward (`send_payment()`), if the sum of the trimmed-to-dust HTLC inbound and
9505         // trimmed-to-dust HTLC outbound balance and this new payment as included on next
9506         // counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll reject the
9507         // update. At HTLC reception (`update_add_htlc()`), if the sum of the trimmed-to-dust HTLC
9508         // inbound and trimmed-to-dust HTLC outbound balance and this new received HTLC as included
9509         // on next counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll fail
9510         // the update. Note, we return a `temporary_channel_failure` (0x1000 | 7), as the channel
9511         // might be available again for HTLC processing once the dust bandwidth has cleared up.
9512
9513         let chanmon_cfgs = create_chanmon_cfgs(2);
9514         let mut config = test_default_channel_config();
9515         config.channel_config.max_dust_htlc_exposure = if multiplier_dust_limit {
9516                 // Default test fee estimator rate is 253 sat/kw, so we set the multiplier to 5_000_000 / 253
9517                 // to get roughly the same initial value as the default setting when this test was
9518                 // originally written.
9519                 MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253)
9520         } else { MaxDustHTLCExposure::FixedLimitMsat(5_000_000) }; // initial default setting value
9521         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9522         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
9523         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9524
9525         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
9526         let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9527         open_channel.max_htlc_value_in_flight_msat = 50_000_000;
9528         open_channel.max_accepted_htlcs = 60;
9529         if on_holder_tx {
9530                 open_channel.dust_limit_satoshis = 546;
9531         }
9532         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
9533         let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
9534         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
9535
9536         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9537
9538         let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
9539
9540         if on_holder_tx {
9541                 let mut node_0_per_peer_lock;
9542                 let mut node_0_peer_state_lock;
9543                 let mut chan = get_outbound_v1_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id);
9544                 chan.context.holder_dust_limit_satoshis = 546;
9545         }
9546
9547         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9548         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
9549         check_added_monitors!(nodes[1], 1);
9550         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9551
9552         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
9553         check_added_monitors!(nodes[0], 1);
9554         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9555
9556         let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
9557         let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
9558         update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
9559
9560         // Fetch a route in advance as we will be unable to once we're unable to send.
9561         let (mut route, payment_hash, _, payment_secret) =
9562                 get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
9563
9564         let (dust_buffer_feerate, max_dust_htlc_exposure_msat) = {
9565                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
9566                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
9567                 let chan = chan_lock.channel_by_id.get(&channel_id).unwrap();
9568                 (chan.context.get_dust_buffer_feerate(None) as u64,
9569                 chan.context.get_max_dust_htlc_exposure_msat(&LowerBoundedFeeEstimator(nodes[0].fee_estimator)))
9570         };
9571         let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
9572         let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
9573
9574         let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
9575         let dust_inbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat;
9576
9577         let dust_htlc_on_counterparty_tx: u64 = 4;
9578         let dust_htlc_on_counterparty_tx_msat: u64 = max_dust_htlc_exposure_msat / dust_htlc_on_counterparty_tx;
9579
9580         if on_holder_tx {
9581                 if dust_outbound_balance {
9582                         // Outbound dust threshold: 2223 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`)
9583                         // Outbound dust balance: 4372 sats
9584                         // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2132 sats
9585                         for _ in 0..dust_outbound_htlc_on_holder_tx {
9586                                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_outbound_htlc_on_holder_tx_msat);
9587                                 nodes[0].node.send_payment_with_route(&route, payment_hash,
9588                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9589                         }
9590                 } else {
9591                         // Inbound dust threshold: 2324 sats (`dust_buffer_feerate` * HTLC_SUCCESS_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`)
9592                         // Inbound dust balance: 4372 sats
9593                         // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2031 sats
9594                         for _ in 0..dust_inbound_htlc_on_holder_tx {
9595                                 route_payment(&nodes[1], &[&nodes[0]], dust_inbound_htlc_on_holder_tx_msat);
9596                         }
9597                 }
9598         } else {
9599                 if dust_outbound_balance {
9600                         // Outbound dust threshold: 2132 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`)
9601                         // Outbound dust balance: 5000 sats
9602                         for _ in 0..dust_htlc_on_counterparty_tx - 1 {
9603                                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_htlc_on_counterparty_tx_msat);
9604                                 nodes[0].node.send_payment_with_route(&route, payment_hash,
9605                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9606                         }
9607                 } else {
9608                         // Inbound dust threshold: 2031 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`)
9609                         // Inbound dust balance: 5000 sats
9610                         for _ in 0..dust_htlc_on_counterparty_tx - 1 {
9611                                 route_payment(&nodes[1], &[&nodes[0]], dust_htlc_on_counterparty_tx_msat);
9612                         }
9613                 }
9614         }
9615
9616         if exposure_breach_event == ExposureEvent::AtHTLCForward {
9617                 route.paths[0].hops.last_mut().unwrap().fee_msat =
9618                         if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 1 };
9619                 // With default dust exposure: 5000 sats
9620                 if on_holder_tx {
9621                         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash,
9622                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
9623                                 ), true, APIError::ChannelUnavailable { .. }, {});
9624                 } else {
9625                         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash,
9626                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
9627                                 ), true, APIError::ChannelUnavailable { .. }, {});
9628                 }
9629         } else if exposure_breach_event == ExposureEvent::AtHTLCReception {
9630                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 4 });
9631                 nodes[1].node.send_payment_with_route(&route, payment_hash,
9632                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9633                 check_added_monitors!(nodes[1], 1);
9634                 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
9635                 assert_eq!(events.len(), 1);
9636                 let payment_event = SendEvent::from_event(events.remove(0));
9637                 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
9638                 // With default dust exposure: 5000 sats
9639                 if on_holder_tx {
9640                         // Outbound dust balance: 6399 sats
9641                         let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1);
9642                         let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat;
9643                         nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, max_dust_htlc_exposure_msat), 1);
9644                 } else {
9645                         // Outbound dust balance: 5200 sats
9646                         nodes[0].logger.assert_log("lightning::ln::channel".to_string(),
9647                                 format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
9648                                         dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx - 1) + dust_htlc_on_counterparty_tx_msat + 4,
9649                                         max_dust_htlc_exposure_msat), 1);
9650                 }
9651         } else if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound {
9652                 route.paths[0].hops.last_mut().unwrap().fee_msat = 2_500_000;
9653                 // For the multiplier dust exposure limit, since it scales with feerate,
9654                 // we need to add a lot of HTLCs that will become dust at the new feerate
9655                 // to cross the threshold.
9656                 for _ in 0..20 {
9657                         let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[1], Some(1_000), None);
9658                         nodes[0].node.send_payment_with_route(&route, payment_hash,
9659                                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9660                 }
9661                 {
9662                         let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
9663                         *feerate_lock = *feerate_lock * 10;
9664                 }
9665                 nodes[0].node.timer_tick_occurred();
9666                 check_added_monitors!(nodes[0], 1);
9667                 nodes[0].logger.assert_log_contains("lightning::ln::channel", "Cannot afford to send new feerate at 2530 without infringing max dust htlc exposure", 1);
9668         }
9669
9670         let _ = nodes[0].node.get_and_clear_pending_msg_events();
9671         let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
9672         added_monitors.clear();
9673 }
9674
9675 fn do_test_max_dust_htlc_exposure_by_threshold_type(multiplier_dust_limit: bool) {
9676         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit);
9677         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit);
9678         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit);
9679         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit);
9680         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit);
9681         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit);
9682         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit);
9683         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit);
9684         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit);
9685         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit);
9686         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit);
9687         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit);
9688 }
9689
9690 #[test]
9691 fn test_max_dust_htlc_exposure() {
9692         do_test_max_dust_htlc_exposure_by_threshold_type(false);
9693         do_test_max_dust_htlc_exposure_by_threshold_type(true);
9694 }
9695
9696 #[test]
9697 fn test_non_final_funding_tx() {
9698         let chanmon_cfgs = create_chanmon_cfgs(2);
9699         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9700         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9701         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9702
9703         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
9704         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9705         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
9706         let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
9707         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
9708
9709         let best_height = nodes[0].node.best_block.read().unwrap().height();
9710
9711         let chan_id = *nodes[0].network_chan_count.borrow();
9712         let events = nodes[0].node.get_and_clear_pending_events();
9713         let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::Script::new(), sequence: Sequence(1), witness: Witness::from_vec(vec!(vec!(1))) };
9714         assert_eq!(events.len(), 1);
9715         let mut tx = match events[0] {
9716                 Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => {
9717                         // Timelock the transaction _beyond_ the best client height + 1.
9718                         Transaction { version: chan_id as i32, lock_time: PackedLockTime(best_height + 2), input: vec![input], output: vec![TxOut {
9719                                 value: *channel_value_satoshis, script_pubkey: output_script.clone(),
9720                         }]}
9721                 },
9722                 _ => panic!("Unexpected event"),
9723         };
9724         // Transaction should fail as it's evaluated as non-final for propagation.
9725         match nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()) {
9726                 Err(APIError::APIMisuseError { err }) => {
9727                         assert_eq!(format!("Funding transaction absolute timelock is non-final"), err);
9728                 },
9729                 _ => panic!()
9730         }
9731
9732         // However, transaction should be accepted if it's in a +1 headroom from best block.
9733         tx.lock_time = PackedLockTime(tx.lock_time.0 - 1);
9734         assert!(nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
9735         get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9736 }
9737
9738 #[test]
9739 fn accept_busted_but_better_fee() {
9740         // If a peer sends us a fee update that is too low, but higher than our previous channel
9741         // feerate, we should accept it. In the future we may want to consider closing the channel
9742         // later, but for now we only accept the update.
9743         let mut chanmon_cfgs = create_chanmon_cfgs(2);
9744         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9745         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9746         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9747
9748         create_chan_between_nodes(&nodes[0], &nodes[1]);
9749
9750         // Set nodes[1] to expect 5,000 sat/kW.
9751         {
9752                 let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
9753                 *feerate_lock = 5000;
9754         }
9755
9756         // If nodes[0] increases their feerate, even if its not enough, nodes[1] should accept it.
9757         {
9758                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
9759                 *feerate_lock = 1000;
9760         }
9761         nodes[0].node.timer_tick_occurred();
9762         check_added_monitors!(nodes[0], 1);
9763
9764         let events = nodes[0].node.get_and_clear_pending_msg_events();
9765         assert_eq!(events.len(), 1);
9766         match events[0] {
9767                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
9768                         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
9769                         commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false);
9770                 },
9771                 _ => panic!("Unexpected event"),
9772         };
9773
9774         // If nodes[0] increases their feerate further, even if its not enough, nodes[1] should accept
9775         // it.
9776         {
9777                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
9778                 *feerate_lock = 2000;
9779         }
9780         nodes[0].node.timer_tick_occurred();
9781         check_added_monitors!(nodes[0], 1);
9782
9783         let events = nodes[0].node.get_and_clear_pending_msg_events();
9784         assert_eq!(events.len(), 1);
9785         match events[0] {
9786                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
9787                         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
9788                         commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false);
9789                 },
9790                 _ => panic!("Unexpected event"),
9791         };
9792
9793         // However, if nodes[0] decreases their feerate, nodes[1] should reject it and close the
9794         // channel.
9795         {
9796                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
9797                 *feerate_lock = 1000;
9798         }
9799         nodes[0].node.timer_tick_occurred();
9800         check_added_monitors!(nodes[0], 1);
9801
9802         let events = nodes[0].node.get_and_clear_pending_msg_events();
9803         assert_eq!(events.len(), 1);
9804         match events[0] {
9805                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
9806                         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
9807                         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError {
9808                                 err: "Peer's feerate much too low. Actual: 1000. Our expected lower limit: 5000 (- 250)".to_owned() },
9809                                 [nodes[0].node.get_our_node_id()], 100000);
9810                         check_closed_broadcast!(nodes[1], true);
9811                         check_added_monitors!(nodes[1], 1);
9812                 },
9813                 _ => panic!("Unexpected event"),
9814         };
9815 }
9816
9817 fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash: bool) {
9818         let mut chanmon_cfgs = create_chanmon_cfgs(2);
9819         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9820         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9821         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9822         let min_final_cltv_expiry_delta = 120;
9823         let final_cltv_expiry_delta = if valid_delta { min_final_cltv_expiry_delta + 2 } else {
9824                 min_final_cltv_expiry_delta - 2 };
9825         let recv_value = 100_000;
9826
9827         create_chan_between_nodes(&nodes[0], &nodes[1]);
9828
9829         let payment_parameters = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), final_cltv_expiry_delta as u32);
9830         let (payment_hash, payment_preimage, payment_secret) = if use_user_hash {
9831                 let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[1],
9832                         Some(recv_value), Some(min_final_cltv_expiry_delta));
9833                 (payment_hash, payment_preimage, payment_secret)
9834         } else {
9835                 let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(recv_value), 7200, Some(min_final_cltv_expiry_delta)).unwrap();
9836                 (payment_hash, nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap(), payment_secret)
9837         };
9838         let route = get_route!(nodes[0], payment_parameters, recv_value).unwrap();
9839         nodes[0].node.send_payment_with_route(&route, payment_hash,
9840                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9841         check_added_monitors!(nodes[0], 1);
9842         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9843         assert_eq!(events.len(), 1);
9844         let mut payment_event = SendEvent::from_event(events.pop().unwrap());
9845         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9846         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
9847         expect_pending_htlcs_forwardable!(nodes[1]);
9848
9849         if valid_delta {
9850                 expect_payment_claimable!(nodes[1], payment_hash, payment_secret, recv_value, if use_user_hash {
9851                         None } else { Some(payment_preimage) }, nodes[1].node.get_our_node_id());
9852
9853                 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
9854         } else {
9855                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
9856
9857                 check_added_monitors!(nodes[1], 1);
9858
9859                 let fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9860                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates.update_fail_htlcs[0]);
9861                 commitment_signed_dance!(nodes[0], nodes[1], fail_updates.commitment_signed, false, true);
9862
9863                 expect_payment_failed!(nodes[0], payment_hash, true);
9864         }
9865 }
9866
9867 #[test]
9868 fn test_payment_with_custom_min_cltv_expiry_delta() {
9869         do_payment_with_custom_min_final_cltv_expiry(false, false);
9870         do_payment_with_custom_min_final_cltv_expiry(false, true);
9871         do_payment_with_custom_min_final_cltv_expiry(true, false);
9872         do_payment_with_custom_min_final_cltv_expiry(true, true);
9873 }
9874
9875 #[test]
9876 fn test_disconnects_peer_awaiting_response_ticks() {
9877         // Tests that nodes which are awaiting on a response critical for channel responsiveness
9878         // disconnect their counterparty after `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
9879         let mut chanmon_cfgs = create_chanmon_cfgs(2);
9880         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9881         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9882         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9883
9884         // Asserts a disconnect event is queued to the user.
9885         let check_disconnect_event = |node: &Node, should_disconnect: bool| {
9886                 let disconnect_event = node.node.get_and_clear_pending_msg_events().iter().find_map(|event|
9887                         if let MessageSendEvent::HandleError { action, .. } = event {
9888                                 if let msgs::ErrorAction::DisconnectPeerWithWarning { .. } = action {
9889                                         Some(())
9890                                 } else {
9891                                         None
9892                                 }
9893                         } else {
9894                                 None
9895                         }
9896                 );
9897                 assert_eq!(disconnect_event.is_some(), should_disconnect);
9898         };
9899
9900         // Fires timer ticks ensuring we only attempt to disconnect peers after reaching
9901         // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
9902         let check_disconnect = |node: &Node| {
9903                 // No disconnect without any timer ticks.
9904                 check_disconnect_event(node, false);
9905
9906                 // No disconnect with 1 timer tick less than required.
9907                 for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS - 1 {
9908                         node.node.timer_tick_occurred();
9909                         check_disconnect_event(node, false);
9910                 }
9911
9912                 // Disconnect after reaching the required ticks.
9913                 node.node.timer_tick_occurred();
9914                 check_disconnect_event(node, true);
9915
9916                 // Disconnect again on the next tick if the peer hasn't been disconnected yet.
9917                 node.node.timer_tick_occurred();
9918                 check_disconnect_event(node, true);
9919         };
9920
9921         create_chan_between_nodes(&nodes[0], &nodes[1]);
9922
9923         // We'll start by performing a fee update with Alice (nodes[0]) on the channel.
9924         *nodes[0].fee_estimator.sat_per_kw.lock().unwrap() *= 2;
9925         nodes[0].node.timer_tick_occurred();
9926         check_added_monitors!(&nodes[0], 1);
9927         let alice_fee_update = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
9928         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), alice_fee_update.update_fee.as_ref().unwrap());
9929         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &alice_fee_update.commitment_signed);
9930         check_added_monitors!(&nodes[1], 1);
9931
9932         // This will prompt Bob (nodes[1]) to respond with his `CommitmentSigned` and `RevokeAndACK`.
9933         let (bob_revoke_and_ack, bob_commitment_signed) = get_revoke_commit_msgs!(&nodes[1], nodes[0].node.get_our_node_id());
9934         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revoke_and_ack);
9935         check_added_monitors!(&nodes[0], 1);
9936         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_commitment_signed);
9937         check_added_monitors(&nodes[0], 1);
9938
9939         // Alice then needs to send her final `RevokeAndACK` to complete the commitment dance. We
9940         // pretend Bob hasn't received the message and check whether he'll disconnect Alice after
9941         // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
9942         let alice_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
9943         check_disconnect(&nodes[1]);
9944
9945         // Now, we'll reconnect them to test awaiting a `ChannelReestablish` message.
9946         //
9947         // Note that since the commitment dance didn't complete above, Alice is expected to resend her
9948         // final `RevokeAndACK` to Bob to complete it.
9949         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
9950         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
9951         let bob_init = msgs::Init {
9952                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
9953         };
9954         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &bob_init, true).unwrap();
9955         let alice_init = msgs::Init {
9956                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
9957         };
9958         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &alice_init, true).unwrap();
9959
9960         // Upon reconnection, Alice sends her `ChannelReestablish` to Bob. Alice, however, hasn't
9961         // received Bob's yet, so she should disconnect him after reaching
9962         // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
9963         let alice_channel_reestablish = get_event_msg!(
9964                 nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()
9965         );
9966         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &alice_channel_reestablish);
9967         check_disconnect(&nodes[0]);
9968
9969         // Bob now sends his `ChannelReestablish` to Alice to resume the channel and consider it "live".
9970         let bob_channel_reestablish = nodes[1].node.get_and_clear_pending_msg_events().iter().find_map(|event|
9971                 if let MessageSendEvent::SendChannelReestablish { node_id, msg } = event {
9972                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
9973                         Some(msg.clone())
9974                 } else {
9975                         None
9976                 }
9977         ).unwrap();
9978         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bob_channel_reestablish);
9979
9980         // Sanity check that Alice won't disconnect Bob since she's no longer waiting for any messages.
9981         for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS {
9982                 nodes[0].node.timer_tick_occurred();
9983                 check_disconnect_event(&nodes[0], false);
9984         }
9985
9986         // However, Bob is still waiting on Alice's `RevokeAndACK`, so he should disconnect her after
9987         // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
9988         check_disconnect(&nodes[1]);
9989
9990         // Finally, have Bob process the last message.
9991         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &alice_revoke_and_ack);
9992         check_added_monitors(&nodes[1], 1);
9993
9994         // At this point, neither node should attempt to disconnect each other, since they aren't
9995         // waiting on any messages.
9996         for node in &nodes {
9997                 for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS {
9998                         node.node.timer_tick_occurred();
9999                         check_disconnect_event(node, false);
10000                 }
10001         }
10002 }
10003
10004 #[test]
10005 fn test_remove_expired_outbound_unfunded_channels() {
10006         let chanmon_cfgs = create_chanmon_cfgs(2);
10007         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10008         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10009         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10010
10011         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
10012         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10013         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10014         let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10015         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10016
10017         let events = nodes[0].node.get_and_clear_pending_events();
10018         assert_eq!(events.len(), 1);
10019         match events[0] {
10020                 Event::FundingGenerationReady { .. } => (),
10021                 _ => panic!("Unexpected event"),
10022         };
10023
10024         // Asserts the outbound channel has been removed from a nodes[0]'s peer state map.
10025         let check_outbound_channel_existence = |should_exist: bool| {
10026                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
10027                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
10028                 assert_eq!(chan_lock.outbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist);
10029         };
10030
10031         // Channel should exist without any timer ticks.
10032         check_outbound_channel_existence(true);
10033
10034         // Channel should exist with 1 timer tick less than required.
10035         for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
10036                 nodes[0].node.timer_tick_occurred();
10037                 check_outbound_channel_existence(true)
10038         }
10039
10040         // Remove channel after reaching the required ticks.
10041         nodes[0].node.timer_tick_occurred();
10042         check_outbound_channel_existence(false);
10043
10044         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
10045         assert_eq!(msg_events.len(), 1);
10046         match msg_events[0] {
10047                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
10048                         assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake");
10049                 },
10050                 _ => panic!("Unexpected event"),
10051         }
10052         check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
10053 }
10054
10055 #[test]
10056 fn test_remove_expired_inbound_unfunded_channels() {
10057         let chanmon_cfgs = create_chanmon_cfgs(2);
10058         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10059         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10060         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10061
10062         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
10063         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10064         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10065         let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10066         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10067
10068         let events = nodes[0].node.get_and_clear_pending_events();
10069         assert_eq!(events.len(), 1);
10070         match events[0] {
10071                 Event::FundingGenerationReady { .. } => (),
10072                 _ => panic!("Unexpected event"),
10073         };
10074
10075         // Asserts the inbound channel has been removed from a nodes[1]'s peer state map.
10076         let check_inbound_channel_existence = |should_exist: bool| {
10077                 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
10078                 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
10079                 assert_eq!(chan_lock.inbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist);
10080         };
10081
10082         // Channel should exist without any timer ticks.
10083         check_inbound_channel_existence(true);
10084
10085         // Channel should exist with 1 timer tick less than required.
10086         for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
10087                 nodes[1].node.timer_tick_occurred();
10088                 check_inbound_channel_existence(true)
10089         }
10090
10091         // Remove channel after reaching the required ticks.
10092         nodes[1].node.timer_tick_occurred();
10093         check_inbound_channel_existence(false);
10094
10095         let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
10096         assert_eq!(msg_events.len(), 1);
10097         match msg_events[0] {
10098                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
10099                         assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake");
10100                 },
10101                 _ => panic!("Unexpected event"),
10102         }
10103         check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
10104 }
10105
10106 fn do_test_multi_post_event_actions(do_reload: bool) {
10107         // Tests handling multiple post-Event actions at once.
10108         // There is specific code in ChannelManager to handle channels where multiple post-Event
10109         // `ChannelMonitorUpdates` are pending at once. This test exercises that code.
10110         //
10111         // Specifically, we test calling `get_and_clear_pending_events` while there are two
10112         // PaymentSents from different channels and one channel has two pending `ChannelMonitorUpdate`s
10113         // - one from an RAA and one from an inbound commitment_signed.
10114         let chanmon_cfgs = create_chanmon_cfgs(3);
10115         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10116         let (persister, chain_monitor);
10117         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
10118         let nodes_0_deserialized;
10119         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10120
10121         let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
10122         let chan_id_2 = create_announced_chan_between_nodes(&nodes, 0, 2).2;
10123
10124         send_payment(&nodes[0], &[&nodes[1]], 1_000_000);
10125         send_payment(&nodes[0], &[&nodes[2]], 1_000_000);
10126
10127         let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
10128         let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[2]], 1_000_000);
10129
10130         nodes[1].node.claim_funds(our_payment_preimage);
10131         check_added_monitors!(nodes[1], 1);
10132         expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000);
10133
10134         nodes[2].node.claim_funds(payment_preimage_2);
10135         check_added_monitors!(nodes[2], 1);
10136         expect_payment_claimed!(nodes[2], payment_hash_2, 1_000_000);
10137
10138         for dest in &[1, 2] {
10139                 let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[*dest], nodes[0].node.get_our_node_id());
10140                 nodes[0].node.handle_update_fulfill_htlc(&nodes[*dest].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]);
10141                 commitment_signed_dance!(nodes[0], nodes[*dest], htlc_fulfill_updates.commitment_signed, false);
10142                 check_added_monitors(&nodes[0], 0);
10143         }
10144
10145         let (route, payment_hash_3, _, payment_secret_3) =
10146                 get_route_and_payment_hash!(nodes[1], nodes[0], 100_000);
10147         let payment_id = PaymentId(payment_hash_3.0);
10148         nodes[1].node.send_payment_with_route(&route, payment_hash_3,
10149                 RecipientOnionFields::secret_only(payment_secret_3), payment_id).unwrap();
10150         check_added_monitors(&nodes[1], 1);
10151
10152         let send_event = SendEvent::from_node(&nodes[1]);
10153         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
10154         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event.commitment_msg);
10155         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
10156
10157         if do_reload {
10158                 let nodes_0_serialized = nodes[0].node.encode();
10159                 let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
10160                 let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_2).encode();
10161                 reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, chain_monitor, nodes_0_deserialized);
10162
10163                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10164                 nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10165
10166                 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
10167                 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[2]));
10168         }
10169
10170         let events = nodes[0].node.get_and_clear_pending_events();
10171         assert_eq!(events.len(), 4);
10172         if let Event::PaymentSent { payment_preimage, .. } = events[0] {
10173                 assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2);
10174         } else { panic!(); }
10175         if let Event::PaymentSent { payment_preimage, .. } = events[1] {
10176                 assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2);
10177         } else { panic!(); }
10178         if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); }
10179         if let Event::PaymentPathSuccessful { .. } = events[3] {} else { panic!(); }
10180
10181         // After the events are processed, the ChannelMonitorUpdates will be released and, upon their
10182         // completion, we'll respond to nodes[1] with an RAA + CS.
10183         get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
10184         check_added_monitors(&nodes[0], 3);
10185 }
10186
10187 #[test]
10188 fn test_multi_post_event_actions() {
10189         do_test_multi_post_event_actions(true);
10190         do_test_multi_post_event_actions(false);
10191 }