Move keysend tests to payment_tests.rs
[rust-lightning] / lightning / src / ln / functional_tests.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 //! Tests that test standing up a network of ChannelManagers, creating channels, sending
11 //! payments/messages between them, and often checking the resulting ChannelMonitors are able to
12 //! claim outputs on-chain.
13
14 use crate::chain;
15 use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
16 use crate::chain::chaininterface::LowerBoundedFeeEstimator;
17 use crate::chain::channelmonitor;
18 use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
19 use crate::chain::transaction::OutPoint;
20 use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource};
21 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
22 use crate::ln::{PaymentPreimage, PaymentSecret, PaymentHash};
23 use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel};
24 use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA};
25 use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError};
26 use crate::ln::{chan_utils, onion_utils};
27 use crate::ln::chan_utils::{OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment};
28 use crate::routing::gossip::{NetworkGraph, NetworkUpdate};
29 use crate::routing::router::{Path, PaymentParameters, Route, RouteHop, get_route};
30 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
31 use crate::ln::msgs;
32 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
33 use crate::util::enforcing_trait_impls::EnforcingSigner;
34 use crate::util::test_utils;
35 use crate::util::errors::APIError;
36 use crate::util::ser::{Writeable, ReadableArgs};
37 use crate::util::string::UntrustedString;
38 use crate::util::config::{UserConfig, MaxDustHTLCExposure};
39
40 use bitcoin::hash_types::BlockHash;
41 use bitcoin::blockdata::script::{Builder, Script};
42 use bitcoin::blockdata::opcodes;
43 use bitcoin::blockdata::constants::genesis_block;
44 use bitcoin::network::constants::Network;
45 use bitcoin::{PackedLockTime, Sequence, Transaction, TxIn, TxOut, Witness};
46 use bitcoin::OutPoint as BitcoinOutPoint;
47
48 use bitcoin::secp256k1::Secp256k1;
49 use bitcoin::secp256k1::{PublicKey,SecretKey};
50
51 use regex;
52
53 use crate::io;
54 use crate::prelude::*;
55 use alloc::collections::BTreeSet;
56 use core::default::Default;
57 use core::iter::repeat;
58 use bitcoin::hashes::Hash;
59 use crate::sync::{Arc, Mutex};
60
61 use crate::ln::functional_test_utils::*;
62 use crate::ln::chan_utils::CommitmentTransaction;
63
64 use super::channel::UNFUNDED_CHANNEL_AGE_LIMIT_TICKS;
65
66 #[test]
67 fn test_insane_channel_opens() {
68         // Stand up a network of 2 nodes
69         use crate::ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS;
70         let mut cfg = UserConfig::default();
71         cfg.channel_handshake_limits.max_funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1;
72         let chanmon_cfgs = create_chanmon_cfgs(2);
73         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
74         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(cfg)]);
75         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
76
77         // Instantiate channel parameters where we push the maximum msats given our
78         // funding satoshis
79         let channel_value_sat = 31337; // same as funding satoshis
80         let channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg);
81         let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000;
82
83         // Have node0 initiate a channel to node1 with aforementioned parameters
84         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_sat, push_msat, 42, None).unwrap();
85
86         // Extract the channel open message from node0 to node1
87         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
88
89         // Test helper that asserts we get the correct error string given a mutator
90         // that supposedly makes the channel open message insane
91         let insane_open_helper = |expected_error_str: &str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| {
92                 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &message_mutator(open_channel_message.clone()));
93                 let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
94                 assert_eq!(msg_events.len(), 1);
95                 let expected_regex = regex::Regex::new(expected_error_str).unwrap();
96                 if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] {
97                         match action {
98                                 &ErrorAction::SendErrorMessage { .. } => {
99                                         nodes[1].logger.assert_log_regex("lightning::ln::channelmanager", expected_regex, 1);
100                                 },
101                                 _ => panic!("unexpected event!"),
102                         }
103                 } else { assert!(false); }
104         };
105
106         use crate::ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT;
107
108         // Test all mutations that would make the channel open message insane
109         insane_open_helper(format!("Per our config, funding must be at most {}. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; msg });
110         insane_open_helper(format!("Funding must be smaller than the total bitcoin supply. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; msg });
111
112         insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.funding_satoshis + 1; msg });
113
114         insane_open_helper(r"push_msat \d+ was larger than channel amount minus reserve \(\d+\)", |mut msg| { msg.push_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
115
116         insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.dust_limit_satoshis = msg.funding_satoshis + 1 ; msg });
117
118         insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.htlc_minimum_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg });
119
120         insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg });
121
122         insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { msg.max_accepted_htlcs = 0; msg });
123
124         insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.max_accepted_htlcs = 484; msg });
125 }
126
127 #[test]
128 fn test_funding_exceeds_no_wumbo_limit() {
129         // Test that if a peer does not support wumbo channels, we'll refuse to open a wumbo channel to
130         // them.
131         use crate::ln::channel::MAX_FUNDING_SATOSHIS_NO_WUMBO;
132         let chanmon_cfgs = create_chanmon_cfgs(2);
133         let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
134         *node_cfgs[1].override_init_features.borrow_mut() = Some(channelmanager::provided_init_features(&test_default_channel_config()).clear_wumbo());
135         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
136         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
137
138         match nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), MAX_FUNDING_SATOSHIS_NO_WUMBO + 1, 0, 42, None) {
139                 Err(APIError::APIMisuseError { err }) => {
140                         assert_eq!(format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, MAX_FUNDING_SATOSHIS_NO_WUMBO + 1), err);
141                 },
142                 _ => panic!()
143         }
144 }
145
146 fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
147         // A peer providing a channel_reserve_satoshis of 0 (or less than our dust limit) is insecure,
148         // but only for them. Because some LSPs do it with some level of trust of the clients (for a
149         // substantial UX improvement), we explicitly allow it. Because it's unlikely to happen often
150         // in normal testing, we test it explicitly here.
151         let chanmon_cfgs = create_chanmon_cfgs(2);
152         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
153         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
154         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
155         let default_config = UserConfig::default();
156
157         // Have node0 initiate a channel to node1 with aforementioned parameters
158         let mut push_amt = 100_000_000;
159         let feerate_per_kw = 253;
160         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
161         push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(&channel_type_features) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000;
162         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
163
164         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None).unwrap();
165         let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
166         if !send_from_initiator {
167                 open_channel_message.channel_reserve_satoshis = 0;
168                 open_channel_message.max_htlc_value_in_flight_msat = 100_000_000;
169         }
170         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
171
172         // Extract the channel accept message from node1 to node0
173         let mut accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
174         if send_from_initiator {
175                 accept_channel_message.channel_reserve_satoshis = 0;
176                 accept_channel_message.max_htlc_value_in_flight_msat = 100_000_000;
177         }
178         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
179         {
180                 let sender_node = if send_from_initiator { &nodes[1] } else { &nodes[0] };
181                 let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] };
182                 let mut sender_node_per_peer_lock;
183                 let mut sender_node_peer_state_lock;
184                 if send_from_initiator {
185                         let chan = get_inbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
186                         chan.context.holder_selected_channel_reserve_satoshis = 0;
187                         chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
188                 } else {
189                         let chan = get_outbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
190                         chan.context.holder_selected_channel_reserve_satoshis = 0;
191                         chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
192                 }
193         }
194
195         let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id);
196         let funding_msgs = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &funding_tx);
197         create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_msgs.0);
198
199         // nodes[0] should now be able to send the full balance to nodes[1], violating nodes[1]'s
200         // security model if it ever tries to send funds back to nodes[0] (but that's not our problem).
201         if send_from_initiator {
202                 send_payment(&nodes[0], &[&nodes[1]], 100_000_000
203                         // Note that for outbound channels we have to consider the commitment tx fee and the
204                         // "fee spike buffer", which is currently a multiple of the total commitment tx fee as
205                         // well as an additional HTLC.
206                         - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * commit_tx_fee_msat(feerate_per_kw, 2, &channel_type_features));
207         } else {
208                 send_payment(&nodes[1], &[&nodes[0]], push_amt);
209         }
210 }
211
212 #[test]
213 fn test_counterparty_no_reserve() {
214         do_test_counterparty_no_reserve(true);
215         do_test_counterparty_no_reserve(false);
216 }
217
218 #[test]
219 fn test_async_inbound_update_fee() {
220         let chanmon_cfgs = create_chanmon_cfgs(2);
221         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
222         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
223         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
224         create_announced_chan_between_nodes(&nodes, 0, 1);
225
226         // balancing
227         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
228
229         // A                                        B
230         // update_fee                            ->
231         // send (1) commitment_signed            -.
232         //                                       <- update_add_htlc/commitment_signed
233         // send (2) RAA (awaiting remote revoke) -.
234         // (1) commitment_signed is delivered    ->
235         //                                       .- send (3) RAA (awaiting remote revoke)
236         // (2) RAA is delivered                  ->
237         //                                       .- send (4) commitment_signed
238         //                                       <- (3) RAA is delivered
239         // send (5) commitment_signed            -.
240         //                                       <- (4) commitment_signed is delivered
241         // send (6) RAA                          -.
242         // (5) commitment_signed is delivered    ->
243         //                                       <- RAA
244         // (6) RAA is delivered                  ->
245
246         // First nodes[0] generates an update_fee
247         {
248                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
249                 *feerate_lock += 20;
250         }
251         nodes[0].node.timer_tick_occurred();
252         check_added_monitors!(nodes[0], 1);
253
254         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
255         assert_eq!(events_0.len(), 1);
256         let (update_msg, commitment_signed) = match events_0[0] { // (1)
257                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
258                         (update_fee.as_ref(), commitment_signed)
259                 },
260                 _ => panic!("Unexpected event"),
261         };
262
263         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
264
265         // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
266         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000);
267         nodes[1].node.send_payment_with_route(&route, our_payment_hash,
268                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
269         check_added_monitors!(nodes[1], 1);
270
271         let payment_event = {
272                 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
273                 assert_eq!(events_1.len(), 1);
274                 SendEvent::from_event(events_1.remove(0))
275         };
276         assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
277         assert_eq!(payment_event.msgs.len(), 1);
278
279         // ...now when the messages get delivered everyone should be happy
280         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
281         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2)
282         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
283         // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
284         check_added_monitors!(nodes[0], 1);
285
286         // deliver(1), generate (3):
287         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
288         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
289         // nodes[1] is awaiting nodes[0] revoke_and_ack so get_event_msg's assert(len == 1) passes
290         check_added_monitors!(nodes[1], 1);
291
292         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack); // deliver (2)
293         let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
294         assert!(bs_update.update_add_htlcs.is_empty()); // (4)
295         assert!(bs_update.update_fulfill_htlcs.is_empty()); // (4)
296         assert!(bs_update.update_fail_htlcs.is_empty()); // (4)
297         assert!(bs_update.update_fail_malformed_htlcs.is_empty()); // (4)
298         assert!(bs_update.update_fee.is_none()); // (4)
299         check_added_monitors!(nodes[1], 1);
300
301         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); // deliver (3)
302         let as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
303         assert!(as_update.update_add_htlcs.is_empty()); // (5)
304         assert!(as_update.update_fulfill_htlcs.is_empty()); // (5)
305         assert!(as_update.update_fail_htlcs.is_empty()); // (5)
306         assert!(as_update.update_fail_malformed_htlcs.is_empty()); // (5)
307         assert!(as_update.update_fee.is_none()); // (5)
308         check_added_monitors!(nodes[0], 1);
309
310         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed); // deliver (4)
311         let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
312         // only (6) so get_event_msg's assert(len == 1) passes
313         check_added_monitors!(nodes[0], 1);
314
315         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update.commitment_signed); // deliver (5)
316         let bs_second_revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
317         check_added_monitors!(nodes[1], 1);
318
319         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke);
320         check_added_monitors!(nodes[0], 1);
321
322         let events_2 = nodes[0].node.get_and_clear_pending_events();
323         assert_eq!(events_2.len(), 1);
324         match events_2[0] {
325                 Event::PendingHTLCsForwardable {..} => {}, // If we actually processed we'd receive the payment
326                 _ => panic!("Unexpected event"),
327         }
328
329         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke); // deliver (6)
330         check_added_monitors!(nodes[1], 1);
331 }
332
333 #[test]
334 fn test_update_fee_unordered_raa() {
335         // Just the intro to the previous test followed by an out-of-order RAA (which caused a
336         // crash in an earlier version of the update_fee patch)
337         let chanmon_cfgs = create_chanmon_cfgs(2);
338         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
339         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
340         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
341         create_announced_chan_between_nodes(&nodes, 0, 1);
342
343         // balancing
344         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
345
346         // First nodes[0] generates an update_fee
347         {
348                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
349                 *feerate_lock += 20;
350         }
351         nodes[0].node.timer_tick_occurred();
352         check_added_monitors!(nodes[0], 1);
353
354         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
355         assert_eq!(events_0.len(), 1);
356         let update_msg = match events_0[0] { // (1)
357                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
358                         update_fee.as_ref()
359                 },
360                 _ => panic!("Unexpected event"),
361         };
362
363         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
364
365         // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
366         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000);
367         nodes[1].node.send_payment_with_route(&route, our_payment_hash,
368                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
369         check_added_monitors!(nodes[1], 1);
370
371         let payment_event = {
372                 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
373                 assert_eq!(events_1.len(), 1);
374                 SendEvent::from_event(events_1.remove(0))
375         };
376         assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
377         assert_eq!(payment_event.msgs.len(), 1);
378
379         // ...now when the messages get delivered everyone should be happy
380         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
381         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2)
382         let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
383         // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
384         check_added_monitors!(nodes[0], 1);
385
386         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg); // deliver (2)
387         check_added_monitors!(nodes[1], 1);
388
389         // We can't continue, sadly, because our (1) now has a bogus signature
390 }
391
392 #[test]
393 fn test_multi_flight_update_fee() {
394         let chanmon_cfgs = create_chanmon_cfgs(2);
395         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
396         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
397         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
398         create_announced_chan_between_nodes(&nodes, 0, 1);
399
400         // A                                        B
401         // update_fee/commitment_signed          ->
402         //                                       .- send (1) RAA and (2) commitment_signed
403         // update_fee (never committed)          ->
404         // (3) update_fee                        ->
405         // We have to manually generate the above update_fee, it is allowed by the protocol but we
406         // don't track which updates correspond to which revoke_and_ack responses so we're in
407         // AwaitingRAA mode and will not generate the update_fee yet.
408         //                                       <- (1) RAA delivered
409         // (3) is generated and send (4) CS      -.
410         // Note that A cannot generate (4) prior to (1) being delivered as it otherwise doesn't
411         // know the per_commitment_point to use for it.
412         //                                       <- (2) commitment_signed delivered
413         // revoke_and_ack                        ->
414         //                                          B should send no response here
415         // (4) commitment_signed delivered       ->
416         //                                       <- RAA/commitment_signed delivered
417         // revoke_and_ack                        ->
418
419         // First nodes[0] generates an update_fee
420         let initial_feerate;
421         {
422                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
423                 initial_feerate = *feerate_lock;
424                 *feerate_lock = initial_feerate + 20;
425         }
426         nodes[0].node.timer_tick_occurred();
427         check_added_monitors!(nodes[0], 1);
428
429         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
430         assert_eq!(events_0.len(), 1);
431         let (update_msg_1, commitment_signed_1) = match events_0[0] { // (1)
432                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
433                         (update_fee.as_ref().unwrap(), commitment_signed)
434                 },
435                 _ => panic!("Unexpected event"),
436         };
437
438         // Deliver first update_fee/commitment_signed pair, generating (1) and (2):
439         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg_1);
440         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed_1);
441         let (bs_revoke_msg, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
442         check_added_monitors!(nodes[1], 1);
443
444         // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment
445         // transaction:
446         {
447                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
448                 *feerate_lock = initial_feerate + 40;
449         }
450         nodes[0].node.timer_tick_occurred();
451         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
452         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
453
454         // Create the (3) update_fee message that nodes[0] will generate before it does...
455         let mut update_msg_2 = msgs::UpdateFee {
456                 channel_id: update_msg_1.channel_id.clone(),
457                 feerate_per_kw: (initial_feerate + 30) as u32,
458         };
459
460         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2);
461
462         update_msg_2.feerate_per_kw = (initial_feerate + 40) as u32;
463         // Deliver (3)
464         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2);
465
466         // Deliver (1), generating (3) and (4)
467         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg);
468         let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
469         check_added_monitors!(nodes[0], 1);
470         assert!(as_second_update.update_add_htlcs.is_empty());
471         assert!(as_second_update.update_fulfill_htlcs.is_empty());
472         assert!(as_second_update.update_fail_htlcs.is_empty());
473         assert!(as_second_update.update_fail_malformed_htlcs.is_empty());
474         // Check that the update_fee newly generated matches what we delivered:
475         assert_eq!(as_second_update.update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id);
476         assert_eq!(as_second_update.update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw);
477
478         // Deliver (2) commitment_signed
479         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
480         let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
481         check_added_monitors!(nodes[0], 1);
482         // No commitment_signed so get_event_msg's assert(len == 1) passes
483
484         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg);
485         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
486         check_added_monitors!(nodes[1], 1);
487
488         // Delever (4)
489         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed);
490         let (bs_second_revoke, bs_second_commitment) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
491         check_added_monitors!(nodes[1], 1);
492
493         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke);
494         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
495         check_added_monitors!(nodes[0], 1);
496
497         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment);
498         let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
499         // No commitment_signed so get_event_msg's assert(len == 1) passes
500         check_added_monitors!(nodes[0], 1);
501
502         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke);
503         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
504         check_added_monitors!(nodes[1], 1);
505 }
506
507 fn do_test_sanity_on_in_flight_opens(steps: u8) {
508         // Previously, we had issues deserializing channels when we hadn't connected the first block
509         // after creation. To catch that and similar issues, we lean on the Node::drop impl to test
510         // serialization round-trips and simply do steps towards opening a channel and then drop the
511         // Node objects.
512
513         let chanmon_cfgs = create_chanmon_cfgs(2);
514         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
515         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
516         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
517
518         if steps & 0b1000_0000 != 0{
519                 let block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new());
520                 connect_block(&nodes[0], &block);
521                 connect_block(&nodes[1], &block);
522         }
523
524         if steps & 0x0f == 0 { return; }
525         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
526         let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
527
528         if steps & 0x0f == 1 { return; }
529         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
530         let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
531
532         if steps & 0x0f == 2 { return; }
533         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
534
535         let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
536
537         if steps & 0x0f == 3 { return; }
538         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
539         check_added_monitors!(nodes[0], 0);
540         let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
541
542         if steps & 0x0f == 4 { return; }
543         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
544         {
545                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
546                 assert_eq!(added_monitors.len(), 1);
547                 assert_eq!(added_monitors[0].0, funding_output);
548                 added_monitors.clear();
549         }
550         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
551
552         let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
553
554         if steps & 0x0f == 5 { return; }
555         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
556         {
557                 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
558                 assert_eq!(added_monitors.len(), 1);
559                 assert_eq!(added_monitors[0].0, funding_output);
560                 added_monitors.clear();
561         }
562
563         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
564         let events_4 = nodes[0].node.get_and_clear_pending_events();
565         assert_eq!(events_4.len(), 0);
566
567         if steps & 0x0f == 6 { return; }
568         create_chan_between_nodes_with_value_confirm_first(&nodes[0], &nodes[1], &tx, 2);
569
570         if steps & 0x0f == 7 { return; }
571         confirm_transaction_at(&nodes[0], &tx, 2);
572         connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
573         create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
574         expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id());
575 }
576
577 #[test]
578 fn test_sanity_on_in_flight_opens() {
579         do_test_sanity_on_in_flight_opens(0);
580         do_test_sanity_on_in_flight_opens(0 | 0b1000_0000);
581         do_test_sanity_on_in_flight_opens(1);
582         do_test_sanity_on_in_flight_opens(1 | 0b1000_0000);
583         do_test_sanity_on_in_flight_opens(2);
584         do_test_sanity_on_in_flight_opens(2 | 0b1000_0000);
585         do_test_sanity_on_in_flight_opens(3);
586         do_test_sanity_on_in_flight_opens(3 | 0b1000_0000);
587         do_test_sanity_on_in_flight_opens(4);
588         do_test_sanity_on_in_flight_opens(4 | 0b1000_0000);
589         do_test_sanity_on_in_flight_opens(5);
590         do_test_sanity_on_in_flight_opens(5 | 0b1000_0000);
591         do_test_sanity_on_in_flight_opens(6);
592         do_test_sanity_on_in_flight_opens(6 | 0b1000_0000);
593         do_test_sanity_on_in_flight_opens(7);
594         do_test_sanity_on_in_flight_opens(7 | 0b1000_0000);
595         do_test_sanity_on_in_flight_opens(8);
596         do_test_sanity_on_in_flight_opens(8 | 0b1000_0000);
597 }
598
599 #[test]
600 fn test_update_fee_vanilla() {
601         let chanmon_cfgs = create_chanmon_cfgs(2);
602         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
603         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
604         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
605         create_announced_chan_between_nodes(&nodes, 0, 1);
606
607         {
608                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
609                 *feerate_lock += 25;
610         }
611         nodes[0].node.timer_tick_occurred();
612         check_added_monitors!(nodes[0], 1);
613
614         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
615         assert_eq!(events_0.len(), 1);
616         let (update_msg, commitment_signed) = match events_0[0] {
617                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
618                         (update_fee.as_ref(), commitment_signed)
619                 },
620                 _ => panic!("Unexpected event"),
621         };
622         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
623
624         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
625         let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
626         check_added_monitors!(nodes[1], 1);
627
628         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
629         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
630         check_added_monitors!(nodes[0], 1);
631
632         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
633         let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
634         // No commitment_signed so get_event_msg's assert(len == 1) passes
635         check_added_monitors!(nodes[0], 1);
636
637         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
638         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
639         check_added_monitors!(nodes[1], 1);
640 }
641
642 #[test]
643 fn test_update_fee_that_funder_cannot_afford() {
644         let chanmon_cfgs = create_chanmon_cfgs(2);
645         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
646         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
647         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
648         let channel_value = 5000;
649         let push_sats = 700;
650         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, push_sats * 1000);
651         let channel_id = chan.2;
652         let secp_ctx = Secp256k1::new();
653         let default_config = UserConfig::default();
654         let bs_channel_reserve_sats = get_holder_selected_channel_reserve_satoshis(channel_value, &default_config);
655
656         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
657
658         // Calculate the maximum feerate that A can afford. Note that we don't send an update_fee
659         // CONCURRENT_INBOUND_HTLC_FEE_BUFFER HTLCs before actually running out of local balance, so we
660         // calculate two different feerates here - the expected local limit as well as the expected
661         // remote limit.
662         let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / (commitment_tx_base_weight(&channel_type_features) + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32;
663         let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / commitment_tx_base_weight(&channel_type_features)) as u32;
664         {
665                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
666                 *feerate_lock = feerate;
667         }
668         nodes[0].node.timer_tick_occurred();
669         check_added_monitors!(nodes[0], 1);
670         let update_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
671
672         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg.update_fee.unwrap());
673
674         commitment_signed_dance!(nodes[1], nodes[0], update_msg.commitment_signed, false);
675
676         // Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate set above.
677         {
678                 let commitment_tx = get_local_commitment_txn!(nodes[1], channel_id)[0].clone();
679
680                 //We made sure neither party's funds are below the dust limit and there are no HTLCs here
681                 assert_eq!(commitment_tx.output.len(), 2);
682                 let total_fee: u64 = commit_tx_fee_msat(feerate, 0, &channel_type_features) / 1000;
683                 let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value);
684                 actual_fee = channel_value - actual_fee;
685                 assert_eq!(total_fee, actual_fee);
686         }
687
688         {
689                 // Increment the feerate by a small constant, accounting for rounding errors
690                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
691                 *feerate_lock += 4;
692         }
693         nodes[0].node.timer_tick_occurred();
694         nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot afford to send new feerate at {}", feerate + 4), 1);
695         check_added_monitors!(nodes[0], 0);
696
697         const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654;
698
699         // Get the EnforcingSigner for each channel, which will be used to (1) get the keys
700         // needed to sign the new commitment tx and (2) sign the new commitment tx.
701         let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = {
702                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
703                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
704                 let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
705                 let chan_signer = local_chan.get_signer();
706                 let pubkeys = chan_signer.pubkeys();
707                 (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
708                  pubkeys.funding_pubkey)
709         };
710         let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point, remote_funding) = {
711                 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
712                 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
713                 let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
714                 let chan_signer = remote_chan.get_signer();
715                 let pubkeys = chan_signer.pubkeys();
716                 (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
717                  chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
718                  pubkeys.funding_pubkey)
719         };
720
721         // Assemble the set of keys we can use for signatures for our commitment_signed message.
722         let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint,
723                 &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint);
724
725         let res = {
726                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
727                 let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
728                 let local_chan = local_chan_lock.channel_by_id.get(&chan.2).unwrap();
729                 let local_chan_signer = local_chan.get_signer();
730                 let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![];
731                 let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
732                         INITIAL_COMMITMENT_NUMBER - 1,
733                         push_sats,
734                         channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, &channel_type_features) / 1000,
735                         local_funding, remote_funding,
736                         commit_tx_keys.clone(),
737                         non_buffer_feerate + 4,
738                         &mut htlcs,
739                         &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
740                 );
741                 local_chan_signer.sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap()
742         };
743
744         let commit_signed_msg = msgs::CommitmentSigned {
745                 channel_id: chan.2,
746                 signature: res.0,
747                 htlc_signatures: res.1,
748                 #[cfg(taproot)]
749                 partial_signature_with_nonce: None,
750         };
751
752         let update_fee = msgs::UpdateFee {
753                 channel_id: chan.2,
754                 feerate_per_kw: non_buffer_feerate + 4,
755         };
756
757         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_fee);
758
759         //While producing the commitment_signed response after handling a received update_fee request the
760         //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve)
761         //Should produce and error.
762         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
763         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1);
764         check_added_monitors!(nodes[1], 1);
765         check_closed_broadcast!(nodes[1], true);
766         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") });
767 }
768
769 #[test]
770 fn test_update_fee_with_fundee_update_add_htlc() {
771         let chanmon_cfgs = create_chanmon_cfgs(2);
772         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
773         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
774         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
775         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
776
777         // balancing
778         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
779
780         {
781                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
782                 *feerate_lock += 20;
783         }
784         nodes[0].node.timer_tick_occurred();
785         check_added_monitors!(nodes[0], 1);
786
787         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
788         assert_eq!(events_0.len(), 1);
789         let (update_msg, commitment_signed) = match events_0[0] {
790                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
791                         (update_fee.as_ref(), commitment_signed)
792                 },
793                 _ => panic!("Unexpected event"),
794         };
795         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
796         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
797         let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
798         check_added_monitors!(nodes[1], 1);
799
800         let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 800000);
801
802         // nothing happens since node[1] is in AwaitingRemoteRevoke
803         nodes[1].node.send_payment_with_route(&route, our_payment_hash,
804                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
805         {
806                 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
807                 assert_eq!(added_monitors.len(), 0);
808                 added_monitors.clear();
809         }
810         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
811         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
812         // node[1] has nothing to do
813
814         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
815         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
816         check_added_monitors!(nodes[0], 1);
817
818         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
819         let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
820         // No commitment_signed so get_event_msg's assert(len == 1) passes
821         check_added_monitors!(nodes[0], 1);
822         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
823         check_added_monitors!(nodes[1], 1);
824         // AwaitingRemoteRevoke ends here
825
826         let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
827         assert_eq!(commitment_update.update_add_htlcs.len(), 1);
828         assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0);
829         assert_eq!(commitment_update.update_fail_htlcs.len(), 0);
830         assert_eq!(commitment_update.update_fail_malformed_htlcs.len(), 0);
831         assert_eq!(commitment_update.update_fee.is_none(), true);
832
833         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &commitment_update.update_add_htlcs[0]);
834         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed);
835         check_added_monitors!(nodes[0], 1);
836         let (revoke, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
837
838         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke);
839         check_added_monitors!(nodes[1], 1);
840         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
841
842         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed);
843         check_added_monitors!(nodes[1], 1);
844         let revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
845         // No commitment_signed so get_event_msg's assert(len == 1) passes
846
847         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke);
848         check_added_monitors!(nodes[0], 1);
849         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
850
851         expect_pending_htlcs_forwardable!(nodes[0]);
852
853         let events = nodes[0].node.get_and_clear_pending_events();
854         assert_eq!(events.len(), 1);
855         match events[0] {
856                 Event::PaymentClaimable { .. } => { },
857                 _ => panic!("Unexpected event"),
858         };
859
860         claim_payment(&nodes[1], &vec!(&nodes[0])[..], our_payment_preimage);
861
862         send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
863         send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
864         close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
865         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
866         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
867 }
868
869 #[test]
870 fn test_update_fee() {
871         let chanmon_cfgs = create_chanmon_cfgs(2);
872         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
873         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
874         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
875         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
876         let channel_id = chan.2;
877
878         // A                                        B
879         // (1) update_fee/commitment_signed      ->
880         //                                       <- (2) revoke_and_ack
881         //                                       .- send (3) commitment_signed
882         // (4) update_fee/commitment_signed      ->
883         //                                       .- send (5) revoke_and_ack (no CS as we're awaiting a revoke)
884         //                                       <- (3) commitment_signed delivered
885         // send (6) revoke_and_ack               -.
886         //                                       <- (5) deliver revoke_and_ack
887         // (6) deliver revoke_and_ack            ->
888         //                                       .- send (7) commitment_signed in response to (4)
889         //                                       <- (7) deliver commitment_signed
890         // revoke_and_ack                        ->
891
892         // Create and deliver (1)...
893         let feerate;
894         {
895                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
896                 feerate = *feerate_lock;
897                 *feerate_lock = feerate + 20;
898         }
899         nodes[0].node.timer_tick_occurred();
900         check_added_monitors!(nodes[0], 1);
901
902         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
903         assert_eq!(events_0.len(), 1);
904         let (update_msg, commitment_signed) = match events_0[0] {
905                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
906                         (update_fee.as_ref(), commitment_signed)
907                 },
908                 _ => panic!("Unexpected event"),
909         };
910         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
911
912         // Generate (2) and (3):
913         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
914         let (revoke_msg, commitment_signed_0) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
915         check_added_monitors!(nodes[1], 1);
916
917         // Deliver (2):
918         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
919         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
920         check_added_monitors!(nodes[0], 1);
921
922         // Create and deliver (4)...
923         {
924                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
925                 *feerate_lock = feerate + 30;
926         }
927         nodes[0].node.timer_tick_occurred();
928         check_added_monitors!(nodes[0], 1);
929         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
930         assert_eq!(events_0.len(), 1);
931         let (update_msg, commitment_signed) = match events_0[0] {
932                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
933                         (update_fee.as_ref(), commitment_signed)
934                 },
935                 _ => panic!("Unexpected event"),
936         };
937
938         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
939         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
940         check_added_monitors!(nodes[1], 1);
941         // ... creating (5)
942         let revoke_msg = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
943         // No commitment_signed so get_event_msg's assert(len == 1) passes
944
945         // Handle (3), creating (6):
946         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_0);
947         check_added_monitors!(nodes[0], 1);
948         let revoke_msg_0 = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
949         // No commitment_signed so get_event_msg's assert(len == 1) passes
950
951         // Deliver (5):
952         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
953         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
954         check_added_monitors!(nodes[0], 1);
955
956         // Deliver (6), creating (7):
957         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg_0);
958         let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
959         assert!(commitment_update.update_add_htlcs.is_empty());
960         assert!(commitment_update.update_fulfill_htlcs.is_empty());
961         assert!(commitment_update.update_fail_htlcs.is_empty());
962         assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
963         assert!(commitment_update.update_fee.is_none());
964         check_added_monitors!(nodes[1], 1);
965
966         // Deliver (7)
967         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed);
968         check_added_monitors!(nodes[0], 1);
969         let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
970         // No commitment_signed so get_event_msg's assert(len == 1) passes
971
972         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
973         check_added_monitors!(nodes[1], 1);
974         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
975
976         assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30);
977         assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30);
978         close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
979         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
980         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
981 }
982
983 #[test]
984 fn fake_network_test() {
985         // Simple test which builds a network of ChannelManagers, connects them to each other, and
986         // tests that payments get routed and transactions broadcast in semi-reasonable ways.
987         let chanmon_cfgs = create_chanmon_cfgs(4);
988         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
989         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
990         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
991
992         // Create some initial channels
993         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
994         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
995         let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
996
997         // Rebalance the network a bit by relaying one payment through all the channels...
998         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
999         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1000         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1001         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1002
1003         // Send some more payments
1004         send_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 1000000);
1005         send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1], &nodes[0])[..], 1000000);
1006         send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1])[..], 1000000);
1007
1008         // Test failure packets
1009         let payment_hash_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 1000000).1;
1010         fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], payment_hash_1);
1011
1012         // Add a new channel that skips 3
1013         let chan_4 = create_announced_chan_between_nodes(&nodes, 1, 3);
1014
1015         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 1000000);
1016         send_payment(&nodes[2], &vec!(&nodes[3])[..], 1000000);
1017         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1018         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1019         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1020         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1021         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1022
1023         // Do some rebalance loop payments, simultaneously
1024         let mut hops = Vec::with_capacity(3);
1025         hops.push(RouteHop {
1026                 pubkey: nodes[2].node.get_our_node_id(),
1027                 node_features: NodeFeatures::empty(),
1028                 short_channel_id: chan_2.0.contents.short_channel_id,
1029                 channel_features: ChannelFeatures::empty(),
1030                 fee_msat: 0,
1031                 cltv_expiry_delta: chan_3.0.contents.cltv_expiry_delta as u32
1032         });
1033         hops.push(RouteHop {
1034                 pubkey: nodes[3].node.get_our_node_id(),
1035                 node_features: NodeFeatures::empty(),
1036                 short_channel_id: chan_3.0.contents.short_channel_id,
1037                 channel_features: ChannelFeatures::empty(),
1038                 fee_msat: 0,
1039                 cltv_expiry_delta: chan_4.1.contents.cltv_expiry_delta as u32
1040         });
1041         hops.push(RouteHop {
1042                 pubkey: nodes[1].node.get_our_node_id(),
1043                 node_features: nodes[1].node.node_features(),
1044                 short_channel_id: chan_4.0.contents.short_channel_id,
1045                 channel_features: nodes[1].node.channel_features(),
1046                 fee_msat: 1000000,
1047                 cltv_expiry_delta: TEST_FINAL_CLTV,
1048         });
1049         hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
1050         hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
1051         let payment_preimage_1 = send_along_route(&nodes[1], Route { paths: vec![Path { hops, blinded_tail: None }], payment_params: None }, &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0;
1052
1053         let mut hops = Vec::with_capacity(3);
1054         hops.push(RouteHop {
1055                 pubkey: nodes[3].node.get_our_node_id(),
1056                 node_features: NodeFeatures::empty(),
1057                 short_channel_id: chan_4.0.contents.short_channel_id,
1058                 channel_features: ChannelFeatures::empty(),
1059                 fee_msat: 0,
1060                 cltv_expiry_delta: chan_3.1.contents.cltv_expiry_delta as u32
1061         });
1062         hops.push(RouteHop {
1063                 pubkey: nodes[2].node.get_our_node_id(),
1064                 node_features: NodeFeatures::empty(),
1065                 short_channel_id: chan_3.0.contents.short_channel_id,
1066                 channel_features: ChannelFeatures::empty(),
1067                 fee_msat: 0,
1068                 cltv_expiry_delta: chan_2.1.contents.cltv_expiry_delta as u32
1069         });
1070         hops.push(RouteHop {
1071                 pubkey: nodes[1].node.get_our_node_id(),
1072                 node_features: nodes[1].node.node_features(),
1073                 short_channel_id: chan_2.0.contents.short_channel_id,
1074                 channel_features: nodes[1].node.channel_features(),
1075                 fee_msat: 1000000,
1076                 cltv_expiry_delta: TEST_FINAL_CLTV,
1077         });
1078         hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
1079         hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
1080         let payment_hash_2 = send_along_route(&nodes[1], Route { paths: vec![Path { hops, blinded_tail: None }], payment_params: None }, &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1;
1081
1082         // Claim the rebalances...
1083         fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
1084         claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
1085
1086         // Close down the channels...
1087         close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
1088         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
1089         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
1090         close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
1091         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
1092         check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
1093         close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
1094         check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
1095         check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
1096         close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
1097         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
1098         check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
1099 }
1100
1101 #[test]
1102 fn holding_cell_htlc_counting() {
1103         // Tests that HTLCs in the holding cell count towards the pending HTLC limits on outbound HTLCs
1104         // to ensure we don't end up with HTLCs sitting around in our holding cell for several
1105         // commitment dance rounds.
1106         let chanmon_cfgs = create_chanmon_cfgs(3);
1107         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1108         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1109         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1110         create_announced_chan_between_nodes(&nodes, 0, 1);
1111         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
1112
1113         // Fetch a route in advance as we will be unable to once we're unable to send.
1114         let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
1115
1116         let mut payments = Vec::new();
1117         for _ in 0..50 {
1118                 let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
1119                 nodes[1].node.send_payment_with_route(&route, payment_hash,
1120                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
1121                 payments.push((payment_preimage, payment_hash));
1122         }
1123         check_added_monitors!(nodes[1], 1);
1124
1125         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
1126         assert_eq!(events.len(), 1);
1127         let initial_payment_event = SendEvent::from_event(events.pop().unwrap());
1128         assert_eq!(initial_payment_event.node_id, nodes[2].node.get_our_node_id());
1129
1130         // There is now one HTLC in an outbound commitment transaction and (OUR_MAX_HTLCS - 1) HTLCs in
1131         // the holding cell waiting on B's RAA to send. At this point we should not be able to add
1132         // another HTLC.
1133         {
1134                 unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, payment_hash_1,
1135                                 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)
1136                         ), true, APIError::ChannelUnavailable { .. }, {});
1137                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1138         }
1139
1140         // This should also be true if we try to forward a payment.
1141         let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
1142         {
1143                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1144                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1145                 check_added_monitors!(nodes[0], 1);
1146         }
1147
1148         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1149         assert_eq!(events.len(), 1);
1150         let payment_event = SendEvent::from_event(events.pop().unwrap());
1151         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1152
1153         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1154         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
1155         // We have to forward pending HTLCs twice - once tries to forward the payment forward (and
1156         // fails), the second will process the resulting failure and fail the HTLC backward.
1157         expect_pending_htlcs_forwardable!(nodes[1]);
1158         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
1159         check_added_monitors!(nodes[1], 1);
1160
1161         let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1162         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_updates.update_fail_htlcs[0]);
1163         commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true);
1164
1165         expect_payment_failed_with_update!(nodes[0], payment_hash_2, false, chan_2.0.contents.short_channel_id, false);
1166
1167         // Now forward all the pending HTLCs and claim them back
1168         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &initial_payment_event.msgs[0]);
1169         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &initial_payment_event.commitment_msg);
1170         check_added_monitors!(nodes[2], 1);
1171
1172         let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1173         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1174         check_added_monitors!(nodes[1], 1);
1175         let as_updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1176
1177         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed);
1178         check_added_monitors!(nodes[1], 1);
1179         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1180
1181         for ref update in as_updates.update_add_htlcs.iter() {
1182                 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), update);
1183         }
1184         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_updates.commitment_signed);
1185         check_added_monitors!(nodes[2], 1);
1186         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
1187         check_added_monitors!(nodes[2], 1);
1188         let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1189
1190         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1191         check_added_monitors!(nodes[1], 1);
1192         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed);
1193         check_added_monitors!(nodes[1], 1);
1194         let as_final_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1195
1196         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_final_raa);
1197         check_added_monitors!(nodes[2], 1);
1198
1199         expect_pending_htlcs_forwardable!(nodes[2]);
1200
1201         let events = nodes[2].node.get_and_clear_pending_events();
1202         assert_eq!(events.len(), payments.len());
1203         for (event, &(_, ref hash)) in events.iter().zip(payments.iter()) {
1204                 match event {
1205                         &Event::PaymentClaimable { ref payment_hash, .. } => {
1206                                 assert_eq!(*payment_hash, *hash);
1207                         },
1208                         _ => panic!("Unexpected event"),
1209                 };
1210         }
1211
1212         for (preimage, _) in payments.drain(..) {
1213                 claim_payment(&nodes[1], &[&nodes[2]], preimage);
1214         }
1215
1216         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
1217 }
1218
1219 #[test]
1220 fn duplicate_htlc_test() {
1221         // Test that we accept duplicate payment_hash HTLCs across the network and that
1222         // claiming/failing them are all separate and don't affect each other
1223         let chanmon_cfgs = create_chanmon_cfgs(6);
1224         let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
1225         let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs, &[None, None, None, None, None, None]);
1226         let mut nodes = create_network(6, &node_cfgs, &node_chanmgrs);
1227
1228         // Create some initial channels to route via 3 to 4/5 from 0/1/2
1229         create_announced_chan_between_nodes(&nodes, 0, 3);
1230         create_announced_chan_between_nodes(&nodes, 1, 3);
1231         create_announced_chan_between_nodes(&nodes, 2, 3);
1232         create_announced_chan_between_nodes(&nodes, 3, 4);
1233         create_announced_chan_between_nodes(&nodes, 3, 5);
1234
1235         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000);
1236
1237         *nodes[0].network_payment_count.borrow_mut() -= 1;
1238         assert_eq!(route_payment(&nodes[1], &vec!(&nodes[3])[..], 1000000).0, payment_preimage);
1239
1240         *nodes[0].network_payment_count.borrow_mut() -= 1;
1241         assert_eq!(route_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], 1000000).0, payment_preimage);
1242
1243         claim_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], payment_preimage);
1244         fail_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], payment_hash);
1245         claim_payment(&nodes[1], &vec!(&nodes[3])[..], payment_preimage);
1246 }
1247
1248 #[test]
1249 fn test_duplicate_htlc_different_direction_onchain() {
1250         // Test that ChannelMonitor doesn't generate 2 preimage txn
1251         // when we have 2 HTLCs with same preimage that go across a node
1252         // in opposite directions, even with the same payment secret.
1253         let chanmon_cfgs = create_chanmon_cfgs(2);
1254         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1255         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1256         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1257
1258         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1259
1260         // balancing
1261         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
1262
1263         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000);
1264
1265         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], 800_000);
1266         let node_a_payment_secret = nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
1267         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[0]]], 800_000, payment_hash, node_a_payment_secret);
1268
1269         // Provide preimage to node 0 by claiming payment
1270         nodes[0].node.claim_funds(payment_preimage);
1271         expect_payment_claimed!(nodes[0], payment_hash, 800_000);
1272         check_added_monitors!(nodes[0], 1);
1273
1274         // Broadcast node 1 commitment txn
1275         let remote_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
1276
1277         assert_eq!(remote_txn[0].output.len(), 4); // 1 local, 1 remote, 1 htlc inbound, 1 htlc outbound
1278         let mut has_both_htlcs = 0; // check htlcs match ones committed
1279         for outp in remote_txn[0].output.iter() {
1280                 if outp.value == 800_000 / 1000 {
1281                         has_both_htlcs += 1;
1282                 } else if outp.value == 900_000 / 1000 {
1283                         has_both_htlcs += 1;
1284                 }
1285         }
1286         assert_eq!(has_both_htlcs, 2);
1287
1288         mine_transaction(&nodes[0], &remote_txn[0]);
1289         check_added_monitors!(nodes[0], 1);
1290         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
1291         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
1292
1293         let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
1294         assert_eq!(claim_txn.len(), 3);
1295
1296         check_spends!(claim_txn[0], remote_txn[0]); // Immediate HTLC claim with preimage
1297         check_spends!(claim_txn[1], remote_txn[0]);
1298         check_spends!(claim_txn[2], remote_txn[0]);
1299         let preimage_tx = &claim_txn[0];
1300         let (preimage_bump_tx, timeout_tx) = if claim_txn[1].input[0].previous_output == preimage_tx.input[0].previous_output {
1301                 (&claim_txn[1], &claim_txn[2])
1302         } else {
1303                 (&claim_txn[2], &claim_txn[1])
1304         };
1305
1306         assert_eq!(preimage_tx.input.len(), 1);
1307         assert_eq!(preimage_bump_tx.input.len(), 1);
1308
1309         assert_eq!(preimage_tx.input.len(), 1);
1310         assert_eq!(preimage_tx.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC 1 <--> 0, preimage tx
1311         assert_eq!(remote_txn[0].output[preimage_tx.input[0].previous_output.vout as usize].value, 800);
1312
1313         assert_eq!(timeout_tx.input.len(), 1);
1314         assert_eq!(timeout_tx.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // HTLC 0 <--> 1, timeout tx
1315         check_spends!(timeout_tx, remote_txn[0]);
1316         assert_eq!(remote_txn[0].output[timeout_tx.input[0].previous_output.vout as usize].value, 900);
1317
1318         let events = nodes[0].node.get_and_clear_pending_msg_events();
1319         assert_eq!(events.len(), 3);
1320         for e in events {
1321                 match e {
1322                         MessageSendEvent::BroadcastChannelUpdate { .. } => {},
1323                         MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
1324                                 assert_eq!(node_id, nodes[1].node.get_our_node_id());
1325                                 assert_eq!(msg.data, "Channel closed because commitment or closing transaction was confirmed on chain.");
1326                         },
1327                         MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
1328                                 assert!(update_add_htlcs.is_empty());
1329                                 assert!(update_fail_htlcs.is_empty());
1330                                 assert_eq!(update_fulfill_htlcs.len(), 1);
1331                                 assert!(update_fail_malformed_htlcs.is_empty());
1332                                 assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
1333                         },
1334                         _ => panic!("Unexpected event"),
1335                 }
1336         }
1337 }
1338
1339 #[test]
1340 fn test_basic_channel_reserve() {
1341         let chanmon_cfgs = create_chanmon_cfgs(2);
1342         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1343         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1344         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1345         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1346
1347         let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
1348         let channel_reserve = chan_stat.channel_reserve_msat;
1349
1350         // The 2* and +1 are for the fee spike reserve.
1351         let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], nodes[1], chan.2), 1 + 1, &get_channel_type_features!(nodes[0], nodes[1], chan.2));
1352         let max_can_send = 5000000 - channel_reserve - commit_tx_fee;
1353         let (mut route, our_payment_hash, _, our_payment_secret) =
1354                 get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
1355         route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
1356         let err = nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1357                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).err().unwrap();
1358         match err {
1359                 PaymentSendFailure::AllFailedResendSafe(ref fails) => {
1360                         if let &APIError::ChannelUnavailable { .. } = &fails[0] {}
1361                         else { panic!("Unexpected error variant"); }
1362                 },
1363                 _ => panic!("Unexpected error variant"),
1364         }
1365         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1366
1367         send_payment(&nodes[0], &vec![&nodes[1]], max_can_send);
1368 }
1369
1370 #[test]
1371 fn test_fee_spike_violation_fails_htlc() {
1372         let chanmon_cfgs = create_chanmon_cfgs(2);
1373         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1374         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1375         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1376         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1377
1378         let (mut route, payment_hash, _, payment_secret) =
1379                 get_route_and_payment_hash!(nodes[0], nodes[1], 3460000);
1380         route.paths[0].hops[0].fee_msat += 1;
1381         // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1382         let secp_ctx = Secp256k1::new();
1383         let session_priv = SecretKey::from_slice(&[42; 32]).expect("RNG is bad!");
1384
1385         let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1;
1386
1387         let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
1388         let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
1389                 3460001, RecipientOnionFields::secret_only(payment_secret), cur_height, &None).unwrap();
1390         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
1391         let msg = msgs::UpdateAddHTLC {
1392                 channel_id: chan.2,
1393                 htlc_id: 0,
1394                 amount_msat: htlc_msat,
1395                 payment_hash: payment_hash,
1396                 cltv_expiry: htlc_cltv,
1397                 onion_routing_packet: onion_packet,
1398                 skimmed_fee_msat: None,
1399         };
1400
1401         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
1402
1403         // Now manually create the commitment_signed message corresponding to the update_add
1404         // nodes[0] just sent. In the code for construction of this message, "local" refers
1405         // to the sender of the message, and "remote" refers to the receiver.
1406
1407         let feerate_per_kw = get_feerate!(nodes[0], nodes[1], chan.2);
1408
1409         const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
1410
1411         // Get the EnforcingSigner for each channel, which will be used to (1) get the keys
1412         // needed to sign the new commitment tx and (2) sign the new commitment tx.
1413         let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point, local_funding) = {
1414                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
1415                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
1416                 let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
1417                 let chan_signer = local_chan.get_signer();
1418                 // Make the signer believe we validated another commitment, so we can release the secret
1419                 chan_signer.get_enforcement_state().last_holder_commitment -= 1;
1420
1421                 let pubkeys = chan_signer.pubkeys();
1422                 (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
1423                  chan_signer.release_commitment_secret(INITIAL_COMMITMENT_NUMBER),
1424                  chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx),
1425                  chan_signer.pubkeys().funding_pubkey)
1426         };
1427         let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = {
1428                 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
1429                 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
1430                 let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
1431                 let chan_signer = remote_chan.get_signer();
1432                 let pubkeys = chan_signer.pubkeys();
1433                 (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
1434                  chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
1435                  chan_signer.pubkeys().funding_pubkey)
1436         };
1437
1438         // Assemble the set of keys we can use for signatures for our commitment_signed message.
1439         let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint,
1440                 &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint);
1441
1442         // Build the remote commitment transaction so we can sign it, and then later use the
1443         // signature for the commitment_signed message.
1444         let local_chan_balance = 1313;
1445
1446         let accepted_htlc_info = chan_utils::HTLCOutputInCommitment {
1447                 offered: false,
1448                 amount_msat: 3460001,
1449                 cltv_expiry: htlc_cltv,
1450                 payment_hash,
1451                 transaction_output_index: Some(1),
1452         };
1453
1454         let commitment_number = INITIAL_COMMITMENT_NUMBER - 1;
1455
1456         let res = {
1457                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
1458                 let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
1459                 let local_chan = local_chan_lock.channel_by_id.get(&chan.2).unwrap();
1460                 let local_chan_signer = local_chan.get_signer();
1461                 let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
1462                         commitment_number,
1463                         95000,
1464                         local_chan_balance,
1465                         local_funding, remote_funding,
1466                         commit_tx_keys.clone(),
1467                         feerate_per_kw,
1468                         &mut vec![(accepted_htlc_info, ())],
1469                         &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
1470                 );
1471                 local_chan_signer.sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap()
1472         };
1473
1474         let commit_signed_msg = msgs::CommitmentSigned {
1475                 channel_id: chan.2,
1476                 signature: res.0,
1477                 htlc_signatures: res.1,
1478                 #[cfg(taproot)]
1479                 partial_signature_with_nonce: None,
1480         };
1481
1482         // Send the commitment_signed message to the nodes[1].
1483         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
1484         let _ = nodes[1].node.get_and_clear_pending_msg_events();
1485
1486         // Send the RAA to nodes[1].
1487         let raa_msg = msgs::RevokeAndACK {
1488                 channel_id: chan.2,
1489                 per_commitment_secret: local_secret,
1490                 next_per_commitment_point: next_local_point,
1491                 #[cfg(taproot)]
1492                 next_local_nonce: None,
1493         };
1494         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa_msg);
1495
1496         let events = nodes[1].node.get_and_clear_pending_msg_events();
1497         assert_eq!(events.len(), 1);
1498         // Make sure the HTLC failed in the way we expect.
1499         match events[0] {
1500                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, .. }, .. } => {
1501                         assert_eq!(update_fail_htlcs.len(), 1);
1502                         update_fail_htlcs[0].clone()
1503                 },
1504                 _ => panic!("Unexpected event"),
1505         };
1506         nodes[1].logger.assert_log("lightning::ln::channel".to_string(),
1507                 format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", ::hex::encode(raa_msg.channel_id)), 1);
1508
1509         check_added_monitors!(nodes[1], 2);
1510 }
1511
1512 #[test]
1513 fn test_chan_reserve_violation_outbound_htlc_inbound_chan() {
1514         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1515         // Set the fee rate for the channel very high, to the point where the fundee
1516         // sending any above-dust amount would result in a channel reserve violation.
1517         // In this test we check that we would be prevented from sending an HTLC in
1518         // this situation.
1519         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1520         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1521         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1522         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1523         let default_config = UserConfig::default();
1524         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1525
1526         let mut push_amt = 100_000_000;
1527         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1528
1529         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1530
1531         let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt);
1532
1533         // Fetch a route in advance as we will be unable to once we're unable to send.
1534         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000);
1535         // Sending exactly enough to hit the reserve amount should be accepted
1536         for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1537                 let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1538         }
1539
1540         // However one more HTLC should be significantly over the reserve amount and fail.
1541         unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash,
1542                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1543                 ), true, APIError::ChannelUnavailable { .. }, {});
1544         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1545 }
1546
1547 #[test]
1548 fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
1549         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1550         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1551         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1552         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1553         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1554         let default_config = UserConfig::default();
1555         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1556
1557         // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
1558         // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
1559         // transaction fee with 0 HTLCs (183 sats)).
1560         let mut push_amt = 100_000_000;
1561         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1562         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1563         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt);
1564
1565         // Send four HTLCs to cover the initial push_msat buffer we're required to include
1566         for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1567                 let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1568         }
1569
1570         let (mut route, payment_hash, _, payment_secret) =
1571                 get_route_and_payment_hash!(nodes[1], nodes[0], 1000);
1572         route.paths[0].hops[0].fee_msat = 700_000;
1573         // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1574         let secp_ctx = Secp256k1::new();
1575         let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
1576         let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1;
1577         let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
1578         let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
1579                 700_000, RecipientOnionFields::secret_only(payment_secret), cur_height, &None).unwrap();
1580         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
1581         let msg = msgs::UpdateAddHTLC {
1582                 channel_id: chan.2,
1583                 htlc_id: MIN_AFFORDABLE_HTLC_COUNT as u64,
1584                 amount_msat: htlc_msat,
1585                 payment_hash: payment_hash,
1586                 cltv_expiry: htlc_cltv,
1587                 onion_routing_packet: onion_packet,
1588                 skimmed_fee_msat: None,
1589         };
1590
1591         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg);
1592         // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
1593         nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string(), 1);
1594         assert_eq!(nodes[0].node.list_channels().len(), 0);
1595         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
1596         assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
1597         check_added_monitors!(nodes[0], 1);
1598         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() });
1599 }
1600
1601 #[test]
1602 fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() {
1603         // Test that if we receive many dust HTLCs over an outbound channel, they don't count when
1604         // calculating our commitment transaction fee (this was previously broken).
1605         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1606         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1607
1608         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1609         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
1610         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1611         let default_config = UserConfig::default();
1612         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1613
1614         // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
1615         // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
1616         // transaction fee with 0 HTLCs (183 sats)).
1617         let mut push_amt = 100_000_000;
1618         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1619         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1620         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt);
1621
1622         let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000
1623                 + feerate_per_kw as u64 * htlc_success_tx_weight(&channel_type_features) / 1000 * 1000 - 1;
1624         // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel
1625         // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the
1626         // commitment transaction fee.
1627         let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], dust_amt);
1628
1629         // Send four HTLCs to cover the initial push_msat buffer we're required to include
1630         for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1631                 let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1632         }
1633
1634         // One more than the dust amt should fail, however.
1635         let (mut route, our_payment_hash, _, our_payment_secret) =
1636                 get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt);
1637         route.paths[0].hops[0].fee_msat += 1;
1638         unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash,
1639                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1640                 ), true, APIError::ChannelUnavailable { .. }, {});
1641 }
1642
1643 #[test]
1644 fn test_chan_init_feerate_unaffordability() {
1645         // Test that we will reject channel opens which do not leave enough to pay for any HTLCs due to
1646         // channel reserve and feerate requirements.
1647         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1648         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1649         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1650         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1651         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1652         let default_config = UserConfig::default();
1653         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1654
1655         // Set the push_msat amount such that nodes[0] will not be able to afford to add even a single
1656         // HTLC.
1657         let mut push_amt = 100_000_000;
1658         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1659         assert_eq!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt + 1, 42, None).unwrap_err(),
1660                 APIError::APIMisuseError { err: "Funding amount (356) can't even pay fee for initial commitment transaction fee of 357.".to_string() });
1661
1662         // During open, we don't have a "counterparty channel reserve" to check against, so that
1663         // requirement only comes into play on the open_channel handling side.
1664         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1665         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt, 42, None).unwrap();
1666         let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
1667         open_channel_msg.push_msat += 1;
1668         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
1669
1670         let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
1671         assert_eq!(msg_events.len(), 1);
1672         match msg_events[0] {
1673                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
1674                         assert_eq!(msg.data, "Insufficient funding amount for initial reserve");
1675                 },
1676                 _ => panic!("Unexpected event"),
1677         }
1678 }
1679
1680 #[test]
1681 fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() {
1682         // Test that if we receive many dust HTLCs over an inbound channel, they don't count when
1683         // calculating our counterparty's commitment transaction fee (this was previously broken).
1684         let chanmon_cfgs = create_chanmon_cfgs(2);
1685         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1686         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
1687         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1688         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 98000000);
1689
1690         let payment_amt = 46000; // Dust amount
1691         // In the previous code, these first four payments would succeed.
1692         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1693         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1694         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1695         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1696
1697         // Then these next 5 would be interpreted by nodes[1] as violating the fee spike buffer.
1698         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1699         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1700         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1701         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1702         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1703
1704         // And this last payment previously resulted in nodes[1] closing on its inbound-channel
1705         // counterparty, because it counted all the previous dust HTLCs against nodes[0]'s commitment
1706         // transaction fee and therefore perceived this next payment as a channel reserve violation.
1707         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1708 }
1709
1710 #[test]
1711 fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
1712         let chanmon_cfgs = create_chanmon_cfgs(3);
1713         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1714         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1715         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1716         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1717         let _ = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000);
1718
1719         let feemsat = 239;
1720         let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat;
1721         let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
1722         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
1723         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
1724
1725         // Add a 2* and +1 for the fee spike reserve.
1726         let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features);
1727         let recv_value_1 = (chan_stat.value_to_self_msat - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlc)/2;
1728         let amt_msat_1 = recv_value_1 + total_routing_fee_msat;
1729
1730         // Add a pending HTLC.
1731         let (route_1, our_payment_hash_1, _, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat_1);
1732         let payment_event_1 = {
1733                 nodes[0].node.send_payment_with_route(&route_1, our_payment_hash_1,
1734                         RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
1735                 check_added_monitors!(nodes[0], 1);
1736
1737                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1738                 assert_eq!(events.len(), 1);
1739                 SendEvent::from_event(events.remove(0))
1740         };
1741         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
1742
1743         // Attempt to trigger a channel reserve violation --> payment failure.
1744         let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2, &channel_type_features);
1745         let recv_value_2 = chan_stat.value_to_self_msat - amt_msat_1 - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlcs + 1;
1746         let amt_msat_2 = recv_value_2 + total_routing_fee_msat;
1747         let mut route_2 = route_1.clone();
1748         route_2.paths[0].hops.last_mut().unwrap().fee_msat = amt_msat_2;
1749
1750         // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1751         let secp_ctx = Secp256k1::new();
1752         let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
1753         let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1;
1754         let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route_2.paths[0], &session_priv).unwrap();
1755         let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
1756                 &route_2.paths[0], recv_value_2, RecipientOnionFields::spontaneous_empty(), cur_height, &None).unwrap();
1757         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash_1).unwrap();
1758         let msg = msgs::UpdateAddHTLC {
1759                 channel_id: chan.2,
1760                 htlc_id: 1,
1761                 amount_msat: htlc_msat + 1,
1762                 payment_hash: our_payment_hash_1,
1763                 cltv_expiry: htlc_cltv,
1764                 onion_routing_packet: onion_packet,
1765                 skimmed_fee_msat: None,
1766         };
1767
1768         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
1769         // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
1770         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote HTLC add would put them under remote reserve value".to_string(), 1);
1771         assert_eq!(nodes[1].node.list_channels().len(), 1);
1772         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
1773         assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
1774         check_added_monitors!(nodes[1], 1);
1775         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() });
1776 }
1777
1778 #[test]
1779 fn test_inbound_outbound_capacity_is_not_zero() {
1780         let chanmon_cfgs = create_chanmon_cfgs(2);
1781         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1782         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1783         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1784         let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1785         let channels0 = node_chanmgrs[0].list_channels();
1786         let channels1 = node_chanmgrs[1].list_channels();
1787         let default_config = UserConfig::default();
1788         assert_eq!(channels0.len(), 1);
1789         assert_eq!(channels1.len(), 1);
1790
1791         let reserve = get_holder_selected_channel_reserve_satoshis(100_000, &default_config);
1792         assert_eq!(channels0[0].inbound_capacity_msat, 95000000 - reserve*1000);
1793         assert_eq!(channels1[0].outbound_capacity_msat, 95000000 - reserve*1000);
1794
1795         assert_eq!(channels0[0].outbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
1796         assert_eq!(channels1[0].inbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
1797 }
1798
1799 fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64, channel_type_features: &ChannelTypeFeatures) -> u64 {
1800         (commitment_tx_base_weight(channel_type_features) + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000
1801 }
1802
1803 #[test]
1804 fn test_channel_reserve_holding_cell_htlcs() {
1805         let chanmon_cfgs = create_chanmon_cfgs(3);
1806         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1807         // When this test was written, the default base fee floated based on the HTLC count.
1808         // It is now fixed, so we simply set the fee to the expected value here.
1809         let mut config = test_default_channel_config();
1810         config.channel_config.forwarding_fee_base_msat = 239;
1811         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
1812         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1813         let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 190000, 1001);
1814         let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 190000, 1001);
1815
1816         let mut stat01 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
1817         let mut stat11 = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2);
1818
1819         let mut stat12 = get_channel_value_stat!(nodes[1], nodes[2], chan_2.2);
1820         let mut stat22 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2);
1821
1822         macro_rules! expect_forward {
1823                 ($node: expr) => {{
1824                         let mut events = $node.node.get_and_clear_pending_msg_events();
1825                         assert_eq!(events.len(), 1);
1826                         check_added_monitors!($node, 1);
1827                         let payment_event = SendEvent::from_event(events.remove(0));
1828                         payment_event
1829                 }}
1830         }
1831
1832         let feemsat = 239; // set above
1833         let total_fee_msat = (nodes.len() - 2) as u64 * feemsat;
1834         let feerate = get_feerate!(nodes[0], nodes[1], chan_1.2);
1835         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_1.2);
1836
1837         let recv_value_0 = stat01.counterparty_max_htlc_value_in_flight_msat - total_fee_msat;
1838
1839         // attempt to send amt_msat > their_max_htlc_value_in_flight_msat
1840         {
1841                 let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
1842                         .with_bolt11_features(nodes[2].node.invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0);
1843                 let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, recv_value_0);
1844                 route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
1845                 assert!(route.paths[0].hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
1846
1847                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1848                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1849                         ), true, APIError::ChannelUnavailable { .. }, {});
1850                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1851         }
1852
1853         // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete
1854         // nodes[0]'s wealth
1855         loop {
1856                 let amt_msat = recv_value_0 + total_fee_msat;
1857                 // 3 for the 3 HTLCs that will be sent, 2* and +1 for the fee spike reserve.
1858                 // Also, ensure that each payment has enough to be over the dust limit to
1859                 // ensure it'll be included in each commit tx fee calculation.
1860                 let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features);
1861                 let ensure_htlc_amounts_above_dust_buffer = 3 * (stat01.counterparty_dust_limit_msat + 1000);
1862                 if stat01.value_to_self_msat < stat01.channel_reserve_msat + commit_tx_fee_all_htlcs + ensure_htlc_amounts_above_dust_buffer + amt_msat {
1863                         break;
1864                 }
1865
1866                 let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
1867                         .with_bolt11_features(nodes[2].node.invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0);
1868                 let route = get_route!(nodes[0], payment_params, recv_value_0).unwrap();
1869                 let (payment_preimage, ..) = send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], recv_value_0);
1870                 claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
1871
1872                 let (stat01_, stat11_, stat12_, stat22_) = (
1873                         get_channel_value_stat!(nodes[0], nodes[1], chan_1.2),
1874                         get_channel_value_stat!(nodes[1], nodes[0], chan_1.2),
1875                         get_channel_value_stat!(nodes[1], nodes[2], chan_2.2),
1876                         get_channel_value_stat!(nodes[2], nodes[1], chan_2.2),
1877                 );
1878
1879                 assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat);
1880                 assert_eq!(stat11_.value_to_self_msat, stat11.value_to_self_msat + amt_msat);
1881                 assert_eq!(stat12_.value_to_self_msat, stat12.value_to_self_msat - (amt_msat - feemsat));
1882                 assert_eq!(stat22_.value_to_self_msat, stat22.value_to_self_msat + (amt_msat - feemsat));
1883                 stat01 = stat01_; stat11 = stat11_; stat12 = stat12_; stat22 = stat22_;
1884         }
1885
1886         // adding pending output.
1887         // 2* and +1 HTLCs on the commit tx fee for the fee spike reserve.
1888         // The reason we're dividing by two here is as follows: the dividend is the total outbound liquidity
1889         // after fees, the channel reserve, and the fee spike buffer are removed. We eventually want to
1890         // divide this quantity into 3 portions, that will each be sent in an HTLC. This allows us
1891         // to test channel channel reserve policy at the edges of what amount is sendable, i.e.
1892         // cases where 1 msat over X amount will cause a payment failure, but anything less than
1893         // that can be sent successfully. So, dividing by two is a somewhat arbitrary way of getting
1894         // the amount of the first of these aforementioned 3 payments. The reason we split into 3 payments
1895         // is to test the behavior of the holding cell with respect to channel reserve and commit tx fee
1896         // policy.
1897         let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features);
1898         let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs)/2;
1899         let amt_msat_1 = recv_value_1 + total_fee_msat;
1900
1901         let (route_1, our_payment_hash_1, our_payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_1);
1902         let payment_event_1 = {
1903                 nodes[0].node.send_payment_with_route(&route_1, our_payment_hash_1,
1904                         RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
1905                 check_added_monitors!(nodes[0], 1);
1906
1907                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1908                 assert_eq!(events.len(), 1);
1909                 SendEvent::from_event(events.remove(0))
1910         };
1911         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
1912
1913         // channel reserve test with htlc pending output > 0
1914         let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs;
1915         {
1916                 let mut route = route_1.clone();
1917                 route.paths[0].hops.last_mut().unwrap().fee_msat = recv_value_2 + 1;
1918                 let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]);
1919                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1920                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1921                         ), true, APIError::ChannelUnavailable { .. }, {});
1922                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1923         }
1924
1925         // split the rest to test holding cell
1926         let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features);
1927         let additional_htlc_cost_msat = commit_tx_fee_3_htlcs - commit_tx_fee_2_htlcs;
1928         let recv_value_21 = recv_value_2/2 - additional_htlc_cost_msat/2;
1929         let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat - additional_htlc_cost_msat;
1930         {
1931                 let stat = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
1932                 assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat + commit_tx_fee_3_htlcs), stat.channel_reserve_msat);
1933         }
1934
1935         // now see if they go through on both sides
1936         let (route_21, our_payment_hash_21, our_payment_preimage_21, our_payment_secret_21) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_21);
1937         // but this will stuck in the holding cell
1938         nodes[0].node.send_payment_with_route(&route_21, our_payment_hash_21,
1939                 RecipientOnionFields::secret_only(our_payment_secret_21), PaymentId(our_payment_hash_21.0)).unwrap();
1940         check_added_monitors!(nodes[0], 0);
1941         let events = nodes[0].node.get_and_clear_pending_events();
1942         assert_eq!(events.len(), 0);
1943
1944         // test with outbound holding cell amount > 0
1945         {
1946                 let (mut route, our_payment_hash, _, our_payment_secret) =
1947                         get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22);
1948                 route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
1949                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1950                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1951                         ), true, APIError::ChannelUnavailable { .. }, {});
1952                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1953         }
1954
1955         let (route_22, our_payment_hash_22, our_payment_preimage_22, our_payment_secret_22) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22);
1956         // this will also stuck in the holding cell
1957         nodes[0].node.send_payment_with_route(&route_22, our_payment_hash_22,
1958                 RecipientOnionFields::secret_only(our_payment_secret_22), PaymentId(our_payment_hash_22.0)).unwrap();
1959         check_added_monitors!(nodes[0], 0);
1960         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
1961         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1962
1963         // flush the pending htlc
1964         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg);
1965         let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1966         check_added_monitors!(nodes[1], 1);
1967
1968         // the pending htlc should be promoted to committed
1969         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack);
1970         check_added_monitors!(nodes[0], 1);
1971         let commitment_update_2 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1972
1973         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed);
1974         let bs_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1975         // No commitment_signed so get_event_msg's assert(len == 1) passes
1976         check_added_monitors!(nodes[0], 1);
1977
1978         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack);
1979         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1980         check_added_monitors!(nodes[1], 1);
1981
1982         expect_pending_htlcs_forwardable!(nodes[1]);
1983
1984         let ref payment_event_11 = expect_forward!(nodes[1]);
1985         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]);
1986         commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false);
1987
1988         expect_pending_htlcs_forwardable!(nodes[2]);
1989         expect_payment_claimable!(nodes[2], our_payment_hash_1, our_payment_secret_1, recv_value_1);
1990
1991         // flush the htlcs in the holding cell
1992         assert_eq!(commitment_update_2.update_add_htlcs.len(), 2);
1993         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]);
1994         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]);
1995         commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false);
1996         expect_pending_htlcs_forwardable!(nodes[1]);
1997
1998         let ref payment_event_3 = expect_forward!(nodes[1]);
1999         assert_eq!(payment_event_3.msgs.len(), 2);
2000         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[0]);
2001         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]);
2002
2003         commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false);
2004         expect_pending_htlcs_forwardable!(nodes[2]);
2005
2006         let events = nodes[2].node.get_and_clear_pending_events();
2007         assert_eq!(events.len(), 2);
2008         match events[0] {
2009                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
2010                         assert_eq!(our_payment_hash_21, *payment_hash);
2011                         assert_eq!(recv_value_21, amount_msat);
2012                         assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
2013                         assert_eq!(via_channel_id, Some(chan_2.2));
2014                         match &purpose {
2015                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
2016                                         assert!(payment_preimage.is_none());
2017                                         assert_eq!(our_payment_secret_21, *payment_secret);
2018                                 },
2019                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
2020                         }
2021                 },
2022                 _ => panic!("Unexpected event"),
2023         }
2024         match events[1] {
2025                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
2026                         assert_eq!(our_payment_hash_22, *payment_hash);
2027                         assert_eq!(recv_value_22, amount_msat);
2028                         assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
2029                         assert_eq!(via_channel_id, Some(chan_2.2));
2030                         match &purpose {
2031                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
2032                                         assert!(payment_preimage.is_none());
2033                                         assert_eq!(our_payment_secret_22, *payment_secret);
2034                                 },
2035                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
2036                         }
2037                 },
2038                 _ => panic!("Unexpected event"),
2039         }
2040
2041         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_1);
2042         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21);
2043         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22);
2044
2045         let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1, &channel_type_features);
2046         let recv_value_3 = commit_tx_fee_2_htlcs - commit_tx_fee_0_htlcs - total_fee_msat;
2047         send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_3);
2048
2049         let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
2050         let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat) - (recv_value_3 + total_fee_msat);
2051         let stat0 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
2052         assert_eq!(stat0.value_to_self_msat, expected_value_to_self);
2053         assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat + commit_tx_fee_1_htlc);
2054
2055         let stat2 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2);
2056         assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22 + recv_value_3);
2057 }
2058
2059 #[test]
2060 fn channel_reserve_in_flight_removes() {
2061         // In cases where one side claims an HTLC, it thinks it has additional available funds that it
2062         // can send to its counterparty, but due to update ordering, the other side may not yet have
2063         // considered those HTLCs fully removed.
2064         // This tests that we don't count HTLCs which will not be included in the next remote
2065         // commitment transaction towards the reserve value (as it implies no commitment transaction
2066         // will be generated which violates the remote reserve value).
2067         // This was broken previously, and discovered by the chanmon_fail_consistency fuzz test.
2068         // To test this we:
2069         //  * route two HTLCs from A to B (note that, at a high level, this test is checking that, when
2070         //    you consider the values of both of these HTLCs, B may not send an HTLC back to A, but if
2071         //    you only consider the value of the first HTLC, it may not),
2072         //  * start routing a third HTLC from A to B,
2073         //  * claim the first two HTLCs (though B will generate an update_fulfill for one, and put
2074         //    the other claim in its holding cell, as it immediately goes into AwaitingRAA),
2075         //  * deliver the first fulfill from B
2076         //  * deliver the update_add and an RAA from A, resulting in B freeing the second holding cell
2077         //    claim,
2078         //  * deliver A's response CS and RAA.
2079         //    This results in A having the second HTLC in AwaitingRemovedRemoteRevoke, but B having
2080         //    removed it fully. B now has the push_msat plus the first two HTLCs in value.
2081         //  * Now B happily sends another HTLC, potentially violating its reserve value from A's point
2082         //    of view (if A counts the AwaitingRemovedRemoteRevoke HTLC).
2083         let chanmon_cfgs = create_chanmon_cfgs(2);
2084         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2085         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2086         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2087         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2088
2089         let b_chan_values = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2);
2090         // Route the first two HTLCs.
2091         let payment_value_1 = b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000;
2092         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], payment_value_1);
2093         let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 20_000);
2094
2095         // Start routing the third HTLC (this is just used to get everyone in the right state).
2096         let (route, payment_hash_3, payment_preimage_3, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
2097         let send_1 = {
2098                 nodes[0].node.send_payment_with_route(&route, payment_hash_3,
2099                         RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
2100                 check_added_monitors!(nodes[0], 1);
2101                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2102                 assert_eq!(events.len(), 1);
2103                 SendEvent::from_event(events.remove(0))
2104         };
2105
2106         // Now claim both of the first two HTLCs on B's end, putting B in AwaitingRAA and generating an
2107         // initial fulfill/CS.
2108         nodes[1].node.claim_funds(payment_preimage_1);
2109         expect_payment_claimed!(nodes[1], payment_hash_1, payment_value_1);
2110         check_added_monitors!(nodes[1], 1);
2111         let bs_removes = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2112
2113         // This claim goes in B's holding cell, allowing us to have a pending B->A RAA which does not
2114         // remove the second HTLC when we send the HTLC back from B to A.
2115         nodes[1].node.claim_funds(payment_preimage_2);
2116         expect_payment_claimed!(nodes[1], payment_hash_2, 20_000);
2117         check_added_monitors!(nodes[1], 1);
2118         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2119
2120         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_removes.update_fulfill_htlcs[0]);
2121         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed);
2122         check_added_monitors!(nodes[0], 1);
2123         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2124         expect_payment_sent_without_paths!(nodes[0], payment_preimage_1);
2125
2126         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]);
2127         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg);
2128         check_added_monitors!(nodes[1], 1);
2129         // B is already AwaitingRAA, so cant generate a CS here
2130         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2131
2132         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2133         check_added_monitors!(nodes[1], 1);
2134         let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2135
2136         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2137         check_added_monitors!(nodes[0], 1);
2138         let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2139
2140         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2141         check_added_monitors!(nodes[1], 1);
2142         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2143
2144         // The second HTLCis removed, but as A is in AwaitingRAA it can't generate a CS here, so the
2145         // RAA that B generated above doesn't fully resolve the second HTLC from A's point of view.
2146         // However, the RAA A generates here *does* fully resolve the HTLC from B's point of view (as A
2147         // can no longer broadcast a commitment transaction with it and B has the preimage so can go
2148         // on-chain as necessary).
2149         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_cs.update_fulfill_htlcs[0]);
2150         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
2151         check_added_monitors!(nodes[0], 1);
2152         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2153         expect_payment_sent_without_paths!(nodes[0], payment_preimage_2);
2154
2155         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2156         check_added_monitors!(nodes[1], 1);
2157         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2158
2159         expect_pending_htlcs_forwardable!(nodes[1]);
2160         expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 100000);
2161
2162         // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't
2163         // resolve the second HTLC from A's point of view.
2164         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2165         check_added_monitors!(nodes[0], 1);
2166         expect_payment_path_successful!(nodes[0]);
2167         let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2168
2169         // Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back
2170         // to A to ensure that A doesn't count the almost-removed HTLC in update_add processing.
2171         let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[1], nodes[0], 10000);
2172         let send_2 = {
2173                 nodes[1].node.send_payment_with_route(&route, payment_hash_4,
2174                         RecipientOnionFields::secret_only(payment_secret_4), PaymentId(payment_hash_4.0)).unwrap();
2175                 check_added_monitors!(nodes[1], 1);
2176                 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
2177                 assert_eq!(events.len(), 1);
2178                 SendEvent::from_event(events.remove(0))
2179         };
2180
2181         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_2.msgs[0]);
2182         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_2.commitment_msg);
2183         check_added_monitors!(nodes[0], 1);
2184         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2185
2186         // Now just resolve all the outstanding messages/HTLCs for completeness...
2187
2188         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2189         check_added_monitors!(nodes[1], 1);
2190         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2191
2192         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2193         check_added_monitors!(nodes[1], 1);
2194
2195         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2196         check_added_monitors!(nodes[0], 1);
2197         expect_payment_path_successful!(nodes[0]);
2198         let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2199
2200         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2201         check_added_monitors!(nodes[1], 1);
2202         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2203
2204         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2205         check_added_monitors!(nodes[0], 1);
2206
2207         expect_pending_htlcs_forwardable!(nodes[0]);
2208         expect_payment_claimable!(nodes[0], payment_hash_4, payment_secret_4, 10000);
2209
2210         claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4);
2211         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
2212 }
2213
2214 #[test]
2215 fn channel_monitor_network_test() {
2216         // Simple test which builds a network of ChannelManagers, connects them to each other, and
2217         // tests that ChannelMonitor is able to recover from various states.
2218         let chanmon_cfgs = create_chanmon_cfgs(5);
2219         let node_cfgs = create_node_cfgs(5, &chanmon_cfgs);
2220         let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &[None, None, None, None, None]);
2221         let nodes = create_network(5, &node_cfgs, &node_chanmgrs);
2222
2223         // Create some initial channels
2224         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2225         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
2226         let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
2227         let chan_4 = create_announced_chan_between_nodes(&nodes, 3, 4);
2228
2229         // Make sure all nodes are at the same starting height
2230         connect_blocks(&nodes[0], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
2231         connect_blocks(&nodes[1], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
2232         connect_blocks(&nodes[2], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
2233         connect_blocks(&nodes[3], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[3].best_block_info().1);
2234         connect_blocks(&nodes[4], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[4].best_block_info().1);
2235
2236         // Rebalance the network a bit by relaying one payment through all the channels...
2237         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2238         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2239         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2240         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2241
2242         // Simple case with no pending HTLCs:
2243         nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
2244         check_added_monitors!(nodes[1], 1);
2245         check_closed_broadcast!(nodes[1], true);
2246         {
2247                 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
2248                 assert_eq!(node_txn.len(), 1);
2249                 mine_transaction(&nodes[0], &node_txn[0]);
2250                 check_added_monitors!(nodes[0], 1);
2251                 test_txn_broadcast(&nodes[0], &chan_1, Some(node_txn[0].clone()), HTLCType::NONE);
2252         }
2253         check_closed_broadcast!(nodes[0], true);
2254         assert_eq!(nodes[0].node.list_channels().len(), 0);
2255         assert_eq!(nodes[1].node.list_channels().len(), 1);
2256         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
2257         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
2258
2259         // One pending HTLC is discarded by the force-close:
2260         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000);
2261
2262         // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
2263         // broadcasted until we reach the timelock time).
2264         nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
2265         check_closed_broadcast!(nodes[1], true);
2266         check_added_monitors!(nodes[1], 1);
2267         {
2268                 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE);
2269                 connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1);
2270                 test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
2271                 mine_transaction(&nodes[2], &node_txn[0]);
2272                 check_added_monitors!(nodes[2], 1);
2273                 test_txn_broadcast(&nodes[2], &chan_2, Some(node_txn[0].clone()), HTLCType::NONE);
2274         }
2275         check_closed_broadcast!(nodes[2], true);
2276         assert_eq!(nodes[1].node.list_channels().len(), 0);
2277         assert_eq!(nodes[2].node.list_channels().len(), 1);
2278         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
2279         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
2280
2281         macro_rules! claim_funds {
2282                 ($node: expr, $prev_node: expr, $preimage: expr, $payment_hash: expr) => {
2283                         {
2284                                 $node.node.claim_funds($preimage);
2285                                 expect_payment_claimed!($node, $payment_hash, 3_000_000);
2286                                 check_added_monitors!($node, 1);
2287
2288                                 let events = $node.node.get_and_clear_pending_msg_events();
2289                                 assert_eq!(events.len(), 1);
2290                                 match events[0] {
2291                                         MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. } } => {
2292                                                 assert!(update_add_htlcs.is_empty());
2293                                                 assert!(update_fail_htlcs.is_empty());
2294                                                 assert_eq!(*node_id, $prev_node.node.get_our_node_id());
2295                                         },
2296                                         _ => panic!("Unexpected event"),
2297                                 };
2298                         }
2299                 }
2300         }
2301
2302         // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
2303         // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
2304         nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap();
2305         check_added_monitors!(nodes[2], 1);
2306         check_closed_broadcast!(nodes[2], true);
2307         let node2_commitment_txid;
2308         {
2309                 let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE);
2310                 connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1);
2311                 test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT);
2312                 node2_commitment_txid = node_txn[0].txid();
2313
2314                 // Claim the payment on nodes[3], giving it knowledge of the preimage
2315                 claim_funds!(nodes[3], nodes[2], payment_preimage_1, payment_hash_1);
2316                 mine_transaction(&nodes[3], &node_txn[0]);
2317                 check_added_monitors!(nodes[3], 1);
2318                 check_preimage_claim(&nodes[3], &node_txn);
2319         }
2320         check_closed_broadcast!(nodes[3], true);
2321         assert_eq!(nodes[2].node.list_channels().len(), 0);
2322         assert_eq!(nodes[3].node.list_channels().len(), 1);
2323         check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
2324         check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
2325
2326         // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
2327         // confusing us in the following tests.
2328         let chan_3_mon = nodes[3].chain_monitor.chain_monitor.remove_monitor(&OutPoint { txid: chan_3.3.txid(), index: 0 });
2329
2330         // One pending HTLC to time out:
2331         let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[3], &[&nodes[4]], 3_000_000);
2332         // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for
2333         // buffer space).
2334
2335         let (close_chan_update_1, close_chan_update_2) = {
2336                 connect_blocks(&nodes[3], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
2337                 let events = nodes[3].node.get_and_clear_pending_msg_events();
2338                 assert_eq!(events.len(), 2);
2339                 let close_chan_update_1 = match events[0] {
2340                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
2341                                 msg.clone()
2342                         },
2343                         _ => panic!("Unexpected event"),
2344                 };
2345                 match events[1] {
2346                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id } => {
2347                                 assert_eq!(node_id, nodes[4].node.get_our_node_id());
2348                         },
2349                         _ => panic!("Unexpected event"),
2350                 }
2351                 check_added_monitors!(nodes[3], 1);
2352
2353                 // Clear bumped claiming txn spending node 2 commitment tx. Bumped txn are generated after reaching some height timer.
2354                 {
2355                         let mut node_txn = nodes[3].tx_broadcaster.txn_broadcasted.lock().unwrap();
2356                         node_txn.retain(|tx| {
2357                                 if tx.input[0].previous_output.txid == node2_commitment_txid {
2358                                         false
2359                                 } else { true }
2360                         });
2361                 }
2362
2363                 let node_txn = test_txn_broadcast(&nodes[3], &chan_4, None, HTLCType::TIMEOUT);
2364
2365                 // Claim the payment on nodes[4], giving it knowledge of the preimage
2366                 claim_funds!(nodes[4], nodes[3], payment_preimage_2, payment_hash_2);
2367
2368                 connect_blocks(&nodes[4], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2);
2369                 let events = nodes[4].node.get_and_clear_pending_msg_events();
2370                 assert_eq!(events.len(), 2);
2371                 let close_chan_update_2 = match events[0] {
2372                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
2373                                 msg.clone()
2374                         },
2375                         _ => panic!("Unexpected event"),
2376                 };
2377                 match events[1] {
2378                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id } => {
2379                                 assert_eq!(node_id, nodes[3].node.get_our_node_id());
2380                         },
2381                         _ => panic!("Unexpected event"),
2382                 }
2383                 check_added_monitors!(nodes[4], 1);
2384                 test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
2385
2386                 mine_transaction(&nodes[4], &node_txn[0]);
2387                 check_preimage_claim(&nodes[4], &node_txn);
2388                 (close_chan_update_1, close_chan_update_2)
2389         };
2390         nodes[3].gossip_sync.handle_channel_update(&close_chan_update_2).unwrap();
2391         nodes[4].gossip_sync.handle_channel_update(&close_chan_update_1).unwrap();
2392         assert_eq!(nodes[3].node.list_channels().len(), 0);
2393         assert_eq!(nodes[4].node.list_channels().len(), 0);
2394
2395         assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon),
2396                 ChannelMonitorUpdateStatus::Completed);
2397         check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
2398         check_closed_event!(nodes[4], 1, ClosureReason::CommitmentTxConfirmed);
2399 }
2400
2401 #[test]
2402 fn test_justice_tx_htlc_timeout() {
2403         // Test justice txn built on revoked HTLC-Timeout tx, against both sides
2404         let mut alice_config = UserConfig::default();
2405         alice_config.channel_handshake_config.announced_channel = true;
2406         alice_config.channel_handshake_limits.force_announced_channel_preference = false;
2407         alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5;
2408         let mut bob_config = UserConfig::default();
2409         bob_config.channel_handshake_config.announced_channel = true;
2410         bob_config.channel_handshake_limits.force_announced_channel_preference = false;
2411         bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3;
2412         let user_cfgs = [Some(alice_config), Some(bob_config)];
2413         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2414         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2415         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
2416         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2417         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
2418         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2419         // Create some new channels:
2420         let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1);
2421
2422         // A pending HTLC which will be revoked:
2423         let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2424         // Get the will-be-revoked local txn from nodes[0]
2425         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_5.2);
2426         assert_eq!(revoked_local_txn.len(), 2); // First commitment tx, then HTLC tx
2427         assert_eq!(revoked_local_txn[0].input.len(), 1);
2428         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_5.3.txid());
2429         assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to 0 are present
2430         assert_eq!(revoked_local_txn[1].input.len(), 1);
2431         assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
2432         assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
2433         // Revoke the old state
2434         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
2435
2436         {
2437                 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2438                 {
2439                         let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
2440                         assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2441                         assert_eq!(node_txn[0].input.len(), 2); // We should claim the revoked output and the HTLC output
2442                         check_spends!(node_txn[0], revoked_local_txn[0]);
2443                         node_txn.swap_remove(0);
2444                 }
2445                 check_added_monitors!(nodes[1], 1);
2446                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
2447                 test_txn_broadcast(&nodes[1], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
2448
2449                 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2450                 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
2451                 // Verify broadcast of revoked HTLC-timeout
2452                 let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
2453                 check_added_monitors!(nodes[0], 1);
2454                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
2455                 // Broadcast revoked HTLC-timeout on node 1
2456                 mine_transaction(&nodes[1], &node_txn[1]);
2457                 test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone());
2458         }
2459         get_announce_close_broadcast_events(&nodes, 0, 1);
2460         assert_eq!(nodes[0].node.list_channels().len(), 0);
2461         assert_eq!(nodes[1].node.list_channels().len(), 0);
2462 }
2463
2464 #[test]
2465 fn test_justice_tx_htlc_success() {
2466         // Test justice txn built on revoked HTLC-Success tx, against both sides
2467         let mut alice_config = UserConfig::default();
2468         alice_config.channel_handshake_config.announced_channel = true;
2469         alice_config.channel_handshake_limits.force_announced_channel_preference = false;
2470         alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5;
2471         let mut bob_config = UserConfig::default();
2472         bob_config.channel_handshake_config.announced_channel = true;
2473         bob_config.channel_handshake_limits.force_announced_channel_preference = false;
2474         bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3;
2475         let user_cfgs = [Some(alice_config), Some(bob_config)];
2476         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2477         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2478         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
2479         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2480         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
2481         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2482         // Create some new channels:
2483         let chan_6 = create_announced_chan_between_nodes(&nodes, 0, 1);
2484
2485         // A pending HTLC which will be revoked:
2486         let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2487         // Get the will-be-revoked local txn from B
2488         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_6.2);
2489         assert_eq!(revoked_local_txn.len(), 1); // Only commitment tx
2490         assert_eq!(revoked_local_txn[0].input.len(), 1);
2491         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_6.3.txid());
2492         assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to A are present
2493         // Revoke the old state
2494         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_4);
2495         {
2496                 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2497                 {
2498                         let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
2499                         assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2500                         assert_eq!(node_txn[0].input.len(), 1); // We claim the received HTLC output
2501
2502                         check_spends!(node_txn[0], revoked_local_txn[0]);
2503                         node_txn.swap_remove(0);
2504                 }
2505                 check_added_monitors!(nodes[0], 1);
2506                 test_txn_broadcast(&nodes[0], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
2507
2508                 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2509                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
2510                 let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
2511                 check_added_monitors!(nodes[1], 1);
2512                 mine_transaction(&nodes[0], &node_txn[1]);
2513                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
2514                 test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone());
2515         }
2516         get_announce_close_broadcast_events(&nodes, 0, 1);
2517         assert_eq!(nodes[0].node.list_channels().len(), 0);
2518         assert_eq!(nodes[1].node.list_channels().len(), 0);
2519 }
2520
2521 #[test]
2522 fn revoked_output_claim() {
2523         // Simple test to ensure a node will claim a revoked output when a stale remote commitment
2524         // transaction is broadcast by its counterparty
2525         let chanmon_cfgs = create_chanmon_cfgs(2);
2526         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2527         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2528         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2529         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2530         // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output
2531         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2532         assert_eq!(revoked_local_txn.len(), 1);
2533         // Only output is the full channel value back to nodes[0]:
2534         assert_eq!(revoked_local_txn[0].output.len(), 1);
2535         // Send a payment through, updating everyone's latest commitment txn
2536         send_payment(&nodes[0], &vec!(&nodes[1])[..], 5000000);
2537
2538         // Inform nodes[1] that nodes[0] broadcast a stale tx
2539         mine_transaction(&nodes[1], &revoked_local_txn[0]);
2540         check_added_monitors!(nodes[1], 1);
2541         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
2542         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
2543         assert_eq!(node_txn.len(), 1); // ChannelMonitor: justice tx against revoked to_local output
2544
2545         check_spends!(node_txn[0], revoked_local_txn[0]);
2546
2547         // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
2548         mine_transaction(&nodes[0], &revoked_local_txn[0]);
2549         get_announce_close_broadcast_events(&nodes, 0, 1);
2550         check_added_monitors!(nodes[0], 1);
2551         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
2552 }
2553
2554 #[test]
2555 fn claim_htlc_outputs_shared_tx() {
2556         // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx
2557         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2558         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2559         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2560         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2561         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2562
2563         // Create some new channel:
2564         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2565
2566         // Rebalance the network to generate htlc in the two directions
2567         send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
2568         // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx
2569         let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
2570         let (_payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
2571
2572         // Get the will-be-revoked local txn from node[0]
2573         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2574         assert_eq!(revoked_local_txn.len(), 2); // commitment tx + 1 HTLC-Timeout tx
2575         assert_eq!(revoked_local_txn[0].input.len(), 1);
2576         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
2577         assert_eq!(revoked_local_txn[1].input.len(), 1);
2578         assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
2579         assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
2580         check_spends!(revoked_local_txn[1], revoked_local_txn[0]);
2581
2582         //Revoke the old state
2583         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
2584
2585         {
2586                 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2587                 check_added_monitors!(nodes[0], 1);
2588                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
2589                 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2590                 check_added_monitors!(nodes[1], 1);
2591                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
2592                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2593                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
2594
2595                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2596                 assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2597
2598                 assert_eq!(node_txn[0].input.len(), 3); // Claim the revoked output + both revoked HTLC outputs
2599                 check_spends!(node_txn[0], revoked_local_txn[0]);
2600
2601                 let mut witness_lens = BTreeSet::new();
2602                 witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
2603                 witness_lens.insert(node_txn[0].input[1].witness.last().unwrap().len());
2604                 witness_lens.insert(node_txn[0].input[2].witness.last().unwrap().len());
2605                 assert_eq!(witness_lens.len(), 3);
2606                 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
2607                 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
2608                 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
2609
2610                 // Finally, mine the penalty transaction and check that we get an HTLC failure after
2611                 // ANTI_REORG_DELAY confirmations.
2612                 mine_transaction(&nodes[1], &node_txn[0]);
2613                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2614                 expect_payment_failed!(nodes[1], payment_hash_2, false);
2615         }
2616         get_announce_close_broadcast_events(&nodes, 0, 1);
2617         assert_eq!(nodes[0].node.list_channels().len(), 0);
2618         assert_eq!(nodes[1].node.list_channels().len(), 0);
2619 }
2620
2621 #[test]
2622 fn claim_htlc_outputs_single_tx() {
2623         // Node revoked old state, htlcs have timed out, claim each of them in separated justice tx
2624         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2625         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2626         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2627         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2628         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2629
2630         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2631
2632         // Rebalance the network to generate htlc in the two directions
2633         send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
2634         // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this
2635         // time as two different claim transactions as we're gonna to timeout htlc with given a high current height
2636         let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
2637         let (_payment_preimage_2, payment_hash_2, _payment_secret_2) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
2638
2639         // Get the will-be-revoked local txn from node[0]
2640         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2641
2642         //Revoke the old state
2643         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
2644
2645         {
2646                 confirm_transaction_at(&nodes[0], &revoked_local_txn[0], 100);
2647                 check_added_monitors!(nodes[0], 1);
2648                 confirm_transaction_at(&nodes[1], &revoked_local_txn[0], 100);
2649                 check_added_monitors!(nodes[1], 1);
2650                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
2651                 let mut events = nodes[0].node.get_and_clear_pending_events();
2652                 expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
2653                 match events.last().unwrap() {
2654                         Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
2655                         _ => panic!("Unexpected event"),
2656                 }
2657
2658                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2659                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
2660
2661                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcast();
2662
2663                 // Check the pair local commitment and HTLC-timeout broadcast due to HTLC expiration
2664                 assert_eq!(node_txn[0].input.len(), 1);
2665                 check_spends!(node_txn[0], chan_1.3);
2666                 assert_eq!(node_txn[1].input.len(), 1);
2667                 let witness_script = node_txn[1].input[0].witness.last().unwrap();
2668                 assert_eq!(witness_script.len(), OFFERED_HTLC_SCRIPT_WEIGHT); //Spending an offered htlc output
2669                 check_spends!(node_txn[1], node_txn[0]);
2670
2671                 // Filter out any non justice transactions.
2672                 node_txn.retain(|tx| tx.input[0].previous_output.txid == revoked_local_txn[0].txid());
2673                 assert!(node_txn.len() > 3);
2674
2675                 assert_eq!(node_txn[0].input.len(), 1);
2676                 assert_eq!(node_txn[1].input.len(), 1);
2677                 assert_eq!(node_txn[2].input.len(), 1);
2678
2679                 check_spends!(node_txn[0], revoked_local_txn[0]);
2680                 check_spends!(node_txn[1], revoked_local_txn[0]);
2681                 check_spends!(node_txn[2], revoked_local_txn[0]);
2682
2683                 let mut witness_lens = BTreeSet::new();
2684                 witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
2685                 witness_lens.insert(node_txn[1].input[0].witness.last().unwrap().len());
2686                 witness_lens.insert(node_txn[2].input[0].witness.last().unwrap().len());
2687                 assert_eq!(witness_lens.len(), 3);
2688                 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
2689                 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
2690                 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
2691
2692                 // Finally, mine the penalty transactions and check that we get an HTLC failure after
2693                 // ANTI_REORG_DELAY confirmations.
2694                 mine_transaction(&nodes[1], &node_txn[0]);
2695                 mine_transaction(&nodes[1], &node_txn[1]);
2696                 mine_transaction(&nodes[1], &node_txn[2]);
2697                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2698                 expect_payment_failed!(nodes[1], payment_hash_2, false);
2699         }
2700         get_announce_close_broadcast_events(&nodes, 0, 1);
2701         assert_eq!(nodes[0].node.list_channels().len(), 0);
2702         assert_eq!(nodes[1].node.list_channels().len(), 0);
2703 }
2704
2705 #[test]
2706 fn test_htlc_on_chain_success() {
2707         // Test that in case of a unilateral close onchain, we detect the state of output and pass
2708         // the preimage backward accordingly. So here we test that ChannelManager is
2709         // broadcasting the right event to other nodes in payment path.
2710         // We test with two HTLCs simultaneously as that was not handled correctly in the past.
2711         // A --------------------> B ----------------------> C (preimage)
2712         // First, C should claim the HTLC outputs via HTLC-Success when its own latest local
2713         // commitment transaction was broadcast.
2714         // Then, B should learn the preimage from said transactions, attempting to claim backwards
2715         // towards B.
2716         // B should be able to claim via preimage if A then broadcasts its local tx.
2717         // Finally, when A sees B's latest local commitment transaction it should be able to claim
2718         // the HTLC outputs via the preimage it learned (which, once confirmed should generate a
2719         // PaymentSent event).
2720
2721         let chanmon_cfgs = create_chanmon_cfgs(3);
2722         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2723         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2724         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2725
2726         // Create some initial channels
2727         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2728         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
2729
2730         // Ensure all nodes are at the same height
2731         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
2732         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
2733         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
2734         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
2735
2736         // Rebalance the network a bit by relaying one payment through all the channels...
2737         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2738         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2739
2740         let (our_payment_preimage, payment_hash_1, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
2741         let (our_payment_preimage_2, payment_hash_2, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
2742
2743         // Broadcast legit commitment tx from C on B's chain
2744         // Broadcast HTLC Success transaction by C on received output from C's commitment tx on B's chain
2745         let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
2746         assert_eq!(commitment_tx.len(), 1);
2747         check_spends!(commitment_tx[0], chan_2.3);
2748         nodes[2].node.claim_funds(our_payment_preimage);
2749         expect_payment_claimed!(nodes[2], payment_hash_1, 3_000_000);
2750         nodes[2].node.claim_funds(our_payment_preimage_2);
2751         expect_payment_claimed!(nodes[2], payment_hash_2, 3_000_000);
2752         check_added_monitors!(nodes[2], 2);
2753         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2754         assert!(updates.update_add_htlcs.is_empty());
2755         assert!(updates.update_fail_htlcs.is_empty());
2756         assert!(updates.update_fail_malformed_htlcs.is_empty());
2757         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2758
2759         mine_transaction(&nodes[2], &commitment_tx[0]);
2760         check_closed_broadcast!(nodes[2], true);
2761         check_added_monitors!(nodes[2], 1);
2762         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
2763         let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 2 (2 * HTLC-Success tx)
2764         assert_eq!(node_txn.len(), 2);
2765         check_spends!(node_txn[0], commitment_tx[0]);
2766         check_spends!(node_txn[1], commitment_tx[0]);
2767         assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2768         assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2769         assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2770         assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2771         assert_eq!(node_txn[0].lock_time.0, 0);
2772         assert_eq!(node_txn[1].lock_time.0, 0);
2773
2774         // Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward
2775         connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), node_txn[0].clone(), node_txn[1].clone()]));
2776         connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
2777         {
2778                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
2779                 assert_eq!(added_monitors.len(), 1);
2780                 assert_eq!(added_monitors[0].0.txid, chan_2.3.txid());
2781                 added_monitors.clear();
2782         }
2783         let forwarded_events = nodes[1].node.get_and_clear_pending_events();
2784         assert_eq!(forwarded_events.len(), 3);
2785         match forwarded_events[0] {
2786                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
2787                 _ => panic!("Unexpected event"),
2788         }
2789         let chan_id = Some(chan_1.2);
2790         match forwarded_events[1] {
2791                 Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => {
2792                         assert_eq!(fee_earned_msat, Some(1000));
2793                         assert_eq!(prev_channel_id, chan_id);
2794                         assert_eq!(claim_from_onchain_tx, true);
2795                         assert_eq!(next_channel_id, Some(chan_2.2));
2796                         assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
2797                 },
2798                 _ => panic!()
2799         }
2800         match forwarded_events[2] {
2801                 Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => {
2802                         assert_eq!(fee_earned_msat, Some(1000));
2803                         assert_eq!(prev_channel_id, chan_id);
2804                         assert_eq!(claim_from_onchain_tx, true);
2805                         assert_eq!(next_channel_id, Some(chan_2.2));
2806                         assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
2807                 },
2808                 _ => panic!()
2809         }
2810         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
2811         {
2812                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
2813                 assert_eq!(added_monitors.len(), 2);
2814                 assert_eq!(added_monitors[0].0.txid, chan_1.3.txid());
2815                 assert_eq!(added_monitors[1].0.txid, chan_1.3.txid());
2816                 added_monitors.clear();
2817         }
2818         assert_eq!(events.len(), 3);
2819
2820         let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
2821         let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events);
2822
2823         match nodes_2_event {
2824                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id: _ } => {},
2825                 _ => panic!("Unexpected event"),
2826         }
2827
2828         match nodes_0_event {
2829                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
2830                         assert!(update_add_htlcs.is_empty());
2831                         assert!(update_fail_htlcs.is_empty());
2832                         assert_eq!(update_fulfill_htlcs.len(), 1);
2833                         assert!(update_fail_malformed_htlcs.is_empty());
2834                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
2835                 },
2836                 _ => panic!("Unexpected event"),
2837         };
2838
2839         // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
2840         match events[0] {
2841                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
2842                 _ => panic!("Unexpected event"),
2843         }
2844
2845         macro_rules! check_tx_local_broadcast {
2846                 ($node: expr, $htlc_offered: expr, $commitment_tx: expr) => { {
2847                         let mut node_txn = $node.tx_broadcaster.txn_broadcasted.lock().unwrap();
2848                         assert_eq!(node_txn.len(), 2);
2849                         // Node[1]: 2 * HTLC-timeout tx
2850                         // Node[0]: 2 * HTLC-timeout tx
2851                         check_spends!(node_txn[0], $commitment_tx);
2852                         check_spends!(node_txn[1], $commitment_tx);
2853                         assert_ne!(node_txn[0].lock_time.0, 0);
2854                         assert_ne!(node_txn[1].lock_time.0, 0);
2855                         if $htlc_offered {
2856                                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2857                                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2858                                 assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2859                                 assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2860                         } else {
2861                                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2862                                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2863                                 assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
2864                                 assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
2865                         }
2866                         node_txn.clear();
2867                 } }
2868         }
2869         // nodes[1] now broadcasts its own timeout-claim of the output that nodes[2] just claimed via success.
2870         check_tx_local_broadcast!(nodes[1], false, commitment_tx[0]);
2871
2872         // Broadcast legit commitment tx from A on B's chain
2873         // Broadcast preimage tx by B on offered output from A commitment tx  on A's chain
2874         let node_a_commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
2875         check_spends!(node_a_commitment_tx[0], chan_1.3);
2876         mine_transaction(&nodes[1], &node_a_commitment_tx[0]);
2877         check_closed_broadcast!(nodes[1], true);
2878         check_added_monitors!(nodes[1], 1);
2879         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
2880         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
2881         assert!(node_txn.len() == 1 || node_txn.len() == 3); // HTLC-Success, 2* RBF bumps of above HTLC txn
2882         let commitment_spend =
2883                 if node_txn.len() == 1 {
2884                         &node_txn[0]
2885                 } else {
2886                         // Certain `ConnectStyle`s will cause RBF bumps of the previous HTLC transaction to be broadcast.
2887                         // FullBlockViaListen
2888                         if node_txn[0].input[0].previous_output.txid == node_a_commitment_tx[0].txid() {
2889                                 check_spends!(node_txn[1], commitment_tx[0]);
2890                                 check_spends!(node_txn[2], commitment_tx[0]);
2891                                 assert_ne!(node_txn[1].input[0].previous_output.vout, node_txn[2].input[0].previous_output.vout);
2892                                 &node_txn[0]
2893                         } else {
2894                                 check_spends!(node_txn[0], commitment_tx[0]);
2895                                 check_spends!(node_txn[1], commitment_tx[0]);
2896                                 assert_ne!(node_txn[0].input[0].previous_output.vout, node_txn[1].input[0].previous_output.vout);
2897                                 &node_txn[2]
2898                         }
2899                 };
2900
2901         check_spends!(commitment_spend, node_a_commitment_tx[0]);
2902         assert_eq!(commitment_spend.input.len(), 2);
2903         assert_eq!(commitment_spend.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2904         assert_eq!(commitment_spend.input[1].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2905         assert_eq!(commitment_spend.lock_time.0, nodes[1].best_block_info().1);
2906         assert!(commitment_spend.output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
2907         // We don't bother to check that B can claim the HTLC output on its commitment tx here as
2908         // we already checked the same situation with A.
2909
2910         // Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent
2911         connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_a_commitment_tx[0].clone(), commitment_spend.clone()]));
2912         connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
2913         check_closed_broadcast!(nodes[0], true);
2914         check_added_monitors!(nodes[0], 1);
2915         let events = nodes[0].node.get_and_clear_pending_events();
2916         assert_eq!(events.len(), 5);
2917         let mut first_claimed = false;
2918         for event in events {
2919                 match event {
2920                         Event::PaymentSent { payment_preimage, payment_hash, .. } => {
2921                                 if payment_preimage == our_payment_preimage && payment_hash == payment_hash_1 {
2922                                         assert!(!first_claimed);
2923                                         first_claimed = true;
2924                                 } else {
2925                                         assert_eq!(payment_preimage, our_payment_preimage_2);
2926                                         assert_eq!(payment_hash, payment_hash_2);
2927                                 }
2928                         },
2929                         Event::PaymentPathSuccessful { .. } => {},
2930                         Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {},
2931                         _ => panic!("Unexpected event"),
2932                 }
2933         }
2934         check_tx_local_broadcast!(nodes[0], true, node_a_commitment_tx[0]);
2935 }
2936
2937 fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
2938         // Test that in case of a unilateral close onchain, we detect the state of output and
2939         // timeout the HTLC backward accordingly. So here we test that ChannelManager is
2940         // broadcasting the right event to other nodes in payment path.
2941         // A ------------------> B ----------------------> C (timeout)
2942         //    B's commitment tx                 C's commitment tx
2943         //            \                                  \
2944         //         B's HTLC timeout tx               B's timeout tx
2945
2946         let chanmon_cfgs = create_chanmon_cfgs(3);
2947         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2948         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2949         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2950         *nodes[0].connect_style.borrow_mut() = connect_style;
2951         *nodes[1].connect_style.borrow_mut() = connect_style;
2952         *nodes[2].connect_style.borrow_mut() = connect_style;
2953
2954         // Create some intial channels
2955         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2956         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
2957
2958         // Rebalance the network a bit by relaying one payment thorugh all the channels...
2959         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2960         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2961
2962         let (_payment_preimage, payment_hash, _payment_secret) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
2963
2964         // Broadcast legit commitment tx from C on B's chain
2965         let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
2966         check_spends!(commitment_tx[0], chan_2.3);
2967         nodes[2].node.fail_htlc_backwards(&payment_hash);
2968         check_added_monitors!(nodes[2], 0);
2969         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]);
2970         check_added_monitors!(nodes[2], 1);
2971
2972         let events = nodes[2].node.get_and_clear_pending_msg_events();
2973         assert_eq!(events.len(), 1);
2974         match events[0] {
2975                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
2976                         assert!(update_add_htlcs.is_empty());
2977                         assert!(!update_fail_htlcs.is_empty());
2978                         assert!(update_fulfill_htlcs.is_empty());
2979                         assert!(update_fail_malformed_htlcs.is_empty());
2980                         assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
2981                 },
2982                 _ => panic!("Unexpected event"),
2983         };
2984         mine_transaction(&nodes[2], &commitment_tx[0]);
2985         check_closed_broadcast!(nodes[2], true);
2986         check_added_monitors!(nodes[2], 1);
2987         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
2988         let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
2989         assert_eq!(node_txn.len(), 0);
2990
2991         // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain
2992         // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
2993         mine_transaction(&nodes[1], &commitment_tx[0]);
2994         check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false);
2995         connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1);
2996         let timeout_tx = {
2997                 let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
2998                 if nodes[1].connect_style.borrow().skips_blocks() {
2999                         assert_eq!(txn.len(), 1);
3000                 } else {
3001                         assert_eq!(txn.len(), 3); // Two extra fee bumps for timeout transaction
3002                 }
3003                 txn.iter().for_each(|tx| check_spends!(tx, commitment_tx[0]));
3004                 assert_eq!(txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3005                 txn.remove(0)
3006         };
3007
3008         mine_transaction(&nodes[1], &timeout_tx);
3009         check_added_monitors!(nodes[1], 1);
3010         check_closed_broadcast!(nodes[1], true);
3011
3012         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3013
3014         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
3015         check_added_monitors!(nodes[1], 1);
3016         let events = nodes[1].node.get_and_clear_pending_msg_events();
3017         assert_eq!(events.len(), 1);
3018         match events[0] {
3019                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3020                         assert!(update_add_htlcs.is_empty());
3021                         assert!(!update_fail_htlcs.is_empty());
3022                         assert!(update_fulfill_htlcs.is_empty());
3023                         assert!(update_fail_malformed_htlcs.is_empty());
3024                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3025                 },
3026                 _ => panic!("Unexpected event"),
3027         };
3028
3029         // Broadcast legit commitment tx from B on A's chain
3030         let commitment_tx = get_local_commitment_txn!(nodes[1], chan_1.2);
3031         check_spends!(commitment_tx[0], chan_1.3);
3032
3033         mine_transaction(&nodes[0], &commitment_tx[0]);
3034         connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
3035
3036         check_closed_broadcast!(nodes[0], true);
3037         check_added_monitors!(nodes[0], 1);
3038         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
3039         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // 1 timeout tx
3040         assert_eq!(node_txn.len(), 1);
3041         check_spends!(node_txn[0], commitment_tx[0]);
3042         assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3043 }
3044
3045 #[test]
3046 fn test_htlc_on_chain_timeout() {
3047         do_test_htlc_on_chain_timeout(ConnectStyle::BestBlockFirstSkippingBlocks);
3048         do_test_htlc_on_chain_timeout(ConnectStyle::TransactionsFirstSkippingBlocks);
3049         do_test_htlc_on_chain_timeout(ConnectStyle::FullBlockViaListen);
3050 }
3051
3052 #[test]
3053 fn test_simple_commitment_revoked_fail_backward() {
3054         // Test that in case of a revoked commitment tx, we detect the resolution of output by justice tx
3055         // and fail backward accordingly.
3056
3057         let chanmon_cfgs = create_chanmon_cfgs(3);
3058         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3059         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3060         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3061
3062         // Create some initial channels
3063         create_announced_chan_between_nodes(&nodes, 0, 1);
3064         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3065
3066         let (payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
3067         // Get the will-be-revoked local txn from nodes[2]
3068         let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
3069         // Revoke the old state
3070         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
3071
3072         let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
3073
3074         mine_transaction(&nodes[1], &revoked_local_txn[0]);
3075         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
3076         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3077         check_added_monitors!(nodes[1], 1);
3078         check_closed_broadcast!(nodes[1], true);
3079
3080         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
3081         check_added_monitors!(nodes[1], 1);
3082         let events = nodes[1].node.get_and_clear_pending_msg_events();
3083         assert_eq!(events.len(), 1);
3084         match events[0] {
3085                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
3086                         assert!(update_add_htlcs.is_empty());
3087                         assert_eq!(update_fail_htlcs.len(), 1);
3088                         assert!(update_fulfill_htlcs.is_empty());
3089                         assert!(update_fail_malformed_htlcs.is_empty());
3090                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3091
3092                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
3093                         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
3094                         expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_2.0.contents.short_channel_id, true);
3095                 },
3096                 _ => panic!("Unexpected event"),
3097         }
3098 }
3099
3100 fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use_dust: bool, no_to_remote: bool) {
3101         // Test that if our counterparty broadcasts a revoked commitment transaction we fail all
3102         // pending HTLCs on that channel backwards even if the HTLCs aren't present in our latest
3103         // commitment transaction anymore.
3104         // To do this, we have the peer which will broadcast a revoked commitment transaction send
3105         // a number of update_fail/commitment_signed updates without ever sending the RAA in
3106         // response to our commitment_signed. This is somewhat misbehavior-y, though not
3107         // technically disallowed and we should probably handle it reasonably.
3108         // Note that this is pretty exhaustive as an outbound HTLC which we haven't yet
3109         // failed/fulfilled backwards must be in at least one of the latest two remote commitment
3110         // transactions:
3111         // * Once we move it out of our holding cell/add it, we will immediately include it in a
3112         //   commitment_signed (implying it will be in the latest remote commitment transaction).
3113         // * Once they remove it, we will send a (the first) commitment_signed without the HTLC,
3114         //   and once they revoke the previous commitment transaction (allowing us to send a new
3115         //   commitment_signed) we will be free to fail/fulfill the HTLC backwards.
3116         let chanmon_cfgs = create_chanmon_cfgs(3);
3117         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3118         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3119         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3120
3121         // Create some initial channels
3122         create_announced_chan_between_nodes(&nodes, 0, 1);
3123         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3124
3125         let (payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], if no_to_remote { 10_000 } else { 3_000_000 });
3126         // Get the will-be-revoked local txn from nodes[2]
3127         let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
3128         assert_eq!(revoked_local_txn[0].output.len(), if no_to_remote { 1 } else { 2 });
3129         // Revoke the old state
3130         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
3131
3132         let value = if use_dust {
3133                 // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as
3134                 // well, so HTLCs at exactly the dust limit will not be included in commitment txn.
3135                 nodes[2].node.per_peer_state.read().unwrap().get(&nodes[1].node.get_our_node_id())
3136                         .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context.holder_dust_limit_satoshis * 1000
3137         } else { 3000000 };
3138
3139         let (_, first_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3140         let (_, second_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3141         let (_, third_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3142
3143         nodes[2].node.fail_htlc_backwards(&first_payment_hash);
3144         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]);
3145         check_added_monitors!(nodes[2], 1);
3146         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3147         assert!(updates.update_add_htlcs.is_empty());
3148         assert!(updates.update_fulfill_htlcs.is_empty());
3149         assert!(updates.update_fail_malformed_htlcs.is_empty());
3150         assert_eq!(updates.update_fail_htlcs.len(), 1);
3151         assert!(updates.update_fee.is_none());
3152         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3153         let bs_raa = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
3154         // Drop the last RAA from 3 -> 2
3155
3156         nodes[2].node.fail_htlc_backwards(&second_payment_hash);
3157         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: second_payment_hash }]);
3158         check_added_monitors!(nodes[2], 1);
3159         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3160         assert!(updates.update_add_htlcs.is_empty());
3161         assert!(updates.update_fulfill_htlcs.is_empty());
3162         assert!(updates.update_fail_malformed_htlcs.is_empty());
3163         assert_eq!(updates.update_fail_htlcs.len(), 1);
3164         assert!(updates.update_fee.is_none());
3165         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3166         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
3167         check_added_monitors!(nodes[1], 1);
3168         // Note that nodes[1] is in AwaitingRAA, so won't send a CS
3169         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
3170         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
3171         check_added_monitors!(nodes[2], 1);
3172
3173         nodes[2].node.fail_htlc_backwards(&third_payment_hash);
3174         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: third_payment_hash }]);
3175         check_added_monitors!(nodes[2], 1);
3176         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3177         assert!(updates.update_add_htlcs.is_empty());
3178         assert!(updates.update_fulfill_htlcs.is_empty());
3179         assert!(updates.update_fail_malformed_htlcs.is_empty());
3180         assert_eq!(updates.update_fail_htlcs.len(), 1);
3181         assert!(updates.update_fee.is_none());
3182         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3183         // At this point first_payment_hash has dropped out of the latest two commitment
3184         // transactions that nodes[1] is tracking...
3185         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
3186         check_added_monitors!(nodes[1], 1);
3187         // Note that nodes[1] is (still) in AwaitingRAA, so won't send a CS
3188         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
3189         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
3190         check_added_monitors!(nodes[2], 1);
3191
3192         // Add a fourth HTLC, this one will get sequestered away in nodes[1]'s holding cell waiting
3193         // on nodes[2]'s RAA.
3194         let (route, fourth_payment_hash, _, fourth_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 1000000);
3195         nodes[1].node.send_payment_with_route(&route, fourth_payment_hash,
3196                 RecipientOnionFields::secret_only(fourth_payment_secret), PaymentId(fourth_payment_hash.0)).unwrap();
3197         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3198         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3199         check_added_monitors!(nodes[1], 0);
3200
3201         if deliver_bs_raa {
3202                 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_raa);
3203                 // One monitor for the new revocation preimage, no second on as we won't generate a new
3204                 // commitment transaction for nodes[0] until process_pending_htlc_forwards().
3205                 check_added_monitors!(nodes[1], 1);
3206                 let events = nodes[1].node.get_and_clear_pending_events();
3207                 assert_eq!(events.len(), 2);
3208                 match events[0] {
3209                         Event::PendingHTLCsForwardable { .. } => { },
3210                         _ => panic!("Unexpected event"),
3211                 };
3212                 match events[1] {
3213                         Event::HTLCHandlingFailed { .. } => { },
3214                         _ => panic!("Unexpected event"),
3215                 }
3216                 // Deliberately don't process the pending fail-back so they all fail back at once after
3217                 // block connection just like the !deliver_bs_raa case
3218         }
3219
3220         let mut failed_htlcs = HashSet::new();
3221         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3222
3223         mine_transaction(&nodes[1], &revoked_local_txn[0]);
3224         check_added_monitors!(nodes[1], 1);
3225         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3226
3227         let events = nodes[1].node.get_and_clear_pending_events();
3228         assert_eq!(events.len(), if deliver_bs_raa { 3 + nodes.len() - 1 } else { 4 + nodes.len() });
3229         match events[0] {
3230                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { },
3231                 _ => panic!("Unexepected event"),
3232         }
3233         match events[1] {
3234                 Event::PaymentPathFailed { ref payment_hash, .. } => {
3235                         assert_eq!(*payment_hash, fourth_payment_hash);
3236                 },
3237                 _ => panic!("Unexpected event"),
3238         }
3239         match events[2] {
3240                 Event::PaymentFailed { ref payment_hash, .. } => {
3241                         assert_eq!(*payment_hash, fourth_payment_hash);
3242                 },
3243                 _ => panic!("Unexpected event"),
3244         }
3245
3246         nodes[1].node.process_pending_htlc_forwards();
3247         check_added_monitors!(nodes[1], 1);
3248
3249         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
3250         assert_eq!(events.len(), if deliver_bs_raa { 4 } else { 3 });
3251
3252         if deliver_bs_raa {
3253                 let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
3254                 match nodes_2_event {
3255                         MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3256                                 assert_eq!(nodes[2].node.get_our_node_id(), *node_id);
3257                                 assert_eq!(update_add_htlcs.len(), 1);
3258                                 assert!(update_fulfill_htlcs.is_empty());
3259                                 assert!(update_fail_htlcs.is_empty());
3260                                 assert!(update_fail_malformed_htlcs.is_empty());
3261                         },
3262                         _ => panic!("Unexpected event"),
3263                 }
3264         }
3265
3266         let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
3267         match nodes_2_event {
3268                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id, ref data } }, node_id: _ } => {
3269                         assert_eq!(channel_id, chan_2.2);
3270                         assert_eq!(data.as_str(), "Channel closed because commitment or closing transaction was confirmed on chain.");
3271                 },
3272                 _ => panic!("Unexpected event"),
3273         }
3274
3275         let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events);
3276         match nodes_0_event {
3277                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
3278                         assert!(update_add_htlcs.is_empty());
3279                         assert_eq!(update_fail_htlcs.len(), 3);
3280                         assert!(update_fulfill_htlcs.is_empty());
3281                         assert!(update_fail_malformed_htlcs.is_empty());
3282                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3283
3284                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
3285                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[1]);
3286                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[2]);
3287
3288                         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
3289
3290                         let events = nodes[0].node.get_and_clear_pending_events();
3291                         assert_eq!(events.len(), 6);
3292                         match events[0] {
3293                                 Event::PaymentPathFailed { ref payment_hash, ref failure, .. } => {
3294                                         assert!(failed_htlcs.insert(payment_hash.0));
3295                                         // If we delivered B's RAA we got an unknown preimage error, not something
3296                                         // that we should update our routing table for.
3297                                         if !deliver_bs_raa {
3298                                                 if let PathFailure::OnPath { network_update: Some(_) } = failure { } else { panic!("Unexpected path failure") }
3299                                         }
3300                                 },
3301                                 _ => panic!("Unexpected event"),
3302                         }
3303                         match events[1] {
3304                                 Event::PaymentFailed { ref payment_hash, .. } => {
3305                                         assert_eq!(*payment_hash, first_payment_hash);
3306                                 },
3307                                 _ => panic!("Unexpected event"),
3308                         }
3309                         match events[2] {
3310                                 Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => {
3311                                         assert!(failed_htlcs.insert(payment_hash.0));
3312                                 },
3313                                 _ => panic!("Unexpected event"),
3314                         }
3315                         match events[3] {
3316                                 Event::PaymentFailed { ref payment_hash, .. } => {
3317                                         assert_eq!(*payment_hash, second_payment_hash);
3318                                 },
3319                                 _ => panic!("Unexpected event"),
3320                         }
3321                         match events[4] {
3322                                 Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => {
3323                                         assert!(failed_htlcs.insert(payment_hash.0));
3324                                 },
3325                                 _ => panic!("Unexpected event"),
3326                         }
3327                         match events[5] {
3328                                 Event::PaymentFailed { ref payment_hash, .. } => {
3329                                         assert_eq!(*payment_hash, third_payment_hash);
3330                                 },
3331                                 _ => panic!("Unexpected event"),
3332                         }
3333                 },
3334                 _ => panic!("Unexpected event"),
3335         }
3336
3337         // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
3338         match events[0] {
3339                 MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {},
3340                 _ => panic!("Unexpected event"),
3341         }
3342
3343         assert!(failed_htlcs.contains(&first_payment_hash.0));
3344         assert!(failed_htlcs.contains(&second_payment_hash.0));
3345         assert!(failed_htlcs.contains(&third_payment_hash.0));
3346 }
3347
3348 #[test]
3349 fn test_commitment_revoked_fail_backward_exhaustive_a() {
3350         do_test_commitment_revoked_fail_backward_exhaustive(false, true, false);
3351         do_test_commitment_revoked_fail_backward_exhaustive(true, true, false);
3352         do_test_commitment_revoked_fail_backward_exhaustive(false, false, false);
3353         do_test_commitment_revoked_fail_backward_exhaustive(true, false, false);
3354 }
3355
3356 #[test]
3357 fn test_commitment_revoked_fail_backward_exhaustive_b() {
3358         do_test_commitment_revoked_fail_backward_exhaustive(false, true, true);
3359         do_test_commitment_revoked_fail_backward_exhaustive(true, true, true);
3360         do_test_commitment_revoked_fail_backward_exhaustive(false, false, true);
3361         do_test_commitment_revoked_fail_backward_exhaustive(true, false, true);
3362 }
3363
3364 #[test]
3365 fn fail_backward_pending_htlc_upon_channel_failure() {
3366         let chanmon_cfgs = create_chanmon_cfgs(2);
3367         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3368         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3369         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3370         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000);
3371
3372         // Alice -> Bob: Route a payment but without Bob sending revoke_and_ack.
3373         {
3374                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000);
3375                 nodes[0].node.send_payment_with_route(&route, payment_hash, RecipientOnionFields::secret_only(payment_secret),
3376                         PaymentId(payment_hash.0)).unwrap();
3377                 check_added_monitors!(nodes[0], 1);
3378
3379                 let payment_event = {
3380                         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3381                         assert_eq!(events.len(), 1);
3382                         SendEvent::from_event(events.remove(0))
3383                 };
3384                 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
3385                 assert_eq!(payment_event.msgs.len(), 1);
3386         }
3387
3388         // Alice -> Bob: Route another payment but now Alice waits for Bob's earlier revoke_and_ack.
3389         let (route, failed_payment_hash, _, failed_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000);
3390         {
3391                 nodes[0].node.send_payment_with_route(&route, failed_payment_hash,
3392                         RecipientOnionFields::secret_only(failed_payment_secret), PaymentId(failed_payment_hash.0)).unwrap();
3393                 check_added_monitors!(nodes[0], 0);
3394
3395                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3396         }
3397
3398         // Alice <- Bob: Send a malformed update_add_htlc so Alice fails the channel.
3399         {
3400                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 50_000);
3401
3402                 let secp_ctx = Secp256k1::new();
3403                 let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
3404                 let current_height = nodes[1].node.best_block.read().unwrap().height() + 1;
3405                 let (onion_payloads, _amount_msat, cltv_expiry) = onion_utils::build_onion_payloads(
3406                         &route.paths[0], 50_000, RecipientOnionFields::secret_only(payment_secret), current_height, &None).unwrap();
3407                 let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
3408                 let onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
3409
3410                 // Send a 0-msat update_add_htlc to fail the channel.
3411                 let update_add_htlc = msgs::UpdateAddHTLC {
3412                         channel_id: chan.2,
3413                         htlc_id: 0,
3414                         amount_msat: 0,
3415                         payment_hash,
3416                         cltv_expiry,
3417                         onion_routing_packet,
3418                         skimmed_fee_msat: None,
3419                 };
3420                 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc);
3421         }
3422         let events = nodes[0].node.get_and_clear_pending_events();
3423         assert_eq!(events.len(), 3);
3424         // Check that Alice fails backward the pending HTLC from the second payment.
3425         match events[0] {
3426                 Event::PaymentPathFailed { payment_hash, .. } => {
3427                         assert_eq!(payment_hash, failed_payment_hash);
3428                 },
3429                 _ => panic!("Unexpected event"),
3430         }
3431         match events[1] {
3432                 Event::PaymentFailed { payment_hash, .. } => {
3433                         assert_eq!(payment_hash, failed_payment_hash);
3434                 },
3435                 _ => panic!("Unexpected event"),
3436         }
3437         match events[2] {
3438                 Event::ChannelClosed { reason: ClosureReason::ProcessingError { ref err }, .. } => {
3439                         assert_eq!(err, "Remote side tried to send a 0-msat HTLC");
3440                 },
3441                 _ => panic!("Unexpected event {:?}", events[1]),
3442         }
3443         check_closed_broadcast!(nodes[0], true);
3444         check_added_monitors!(nodes[0], 1);
3445 }
3446
3447 #[test]
3448 fn test_htlc_ignore_latest_remote_commitment() {
3449         // Test that HTLC transactions spending the latest remote commitment transaction are simply
3450         // ignored if we cannot claim them. This originally tickled an invalid unwrap().
3451         let chanmon_cfgs = create_chanmon_cfgs(2);
3452         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3453         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3454         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3455         if *nodes[1].connect_style.borrow() == ConnectStyle::FullBlockViaListen {
3456                 // We rely on the ability to connect a block redundantly, which isn't allowed via
3457                 // `chain::Listen`, so we never run the test if we randomly get assigned that
3458                 // connect_style.
3459                 return;
3460         }
3461         create_announced_chan_between_nodes(&nodes, 0, 1);
3462
3463         route_payment(&nodes[0], &[&nodes[1]], 10000000);
3464         nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
3465         connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
3466         check_closed_broadcast!(nodes[0], true);
3467         check_added_monitors!(nodes[0], 1);
3468         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
3469
3470         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3471         assert_eq!(node_txn.len(), 3);
3472         assert_eq!(node_txn[0].txid(), node_txn[1].txid());
3473
3474         let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[1].clone()]);
3475         connect_block(&nodes[1], &block);
3476         check_closed_broadcast!(nodes[1], true);
3477         check_added_monitors!(nodes[1], 1);
3478         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
3479
3480         // Duplicate the connect_block call since this may happen due to other listeners
3481         // registering new transactions
3482         connect_block(&nodes[1], &block);
3483 }
3484
3485 #[test]
3486 fn test_force_close_fail_back() {
3487         // Check which HTLCs are failed-backwards on channel force-closure
3488         let chanmon_cfgs = create_chanmon_cfgs(3);
3489         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3490         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3491         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3492         create_announced_chan_between_nodes(&nodes, 0, 1);
3493         create_announced_chan_between_nodes(&nodes, 1, 2);
3494
3495         let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000);
3496
3497         let mut payment_event = {
3498                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
3499                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
3500                 check_added_monitors!(nodes[0], 1);
3501
3502                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3503                 assert_eq!(events.len(), 1);
3504                 SendEvent::from_event(events.remove(0))
3505         };
3506
3507         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
3508         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
3509
3510         expect_pending_htlcs_forwardable!(nodes[1]);
3511
3512         let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
3513         assert_eq!(events_2.len(), 1);
3514         payment_event = SendEvent::from_event(events_2.remove(0));
3515         assert_eq!(payment_event.msgs.len(), 1);
3516
3517         check_added_monitors!(nodes[1], 1);
3518         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
3519         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg);
3520         check_added_monitors!(nodes[2], 1);
3521         let (_, _) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3522
3523         // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous
3524         // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
3525         // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
3526
3527         nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
3528         check_closed_broadcast!(nodes[2], true);
3529         check_added_monitors!(nodes[2], 1);
3530         check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
3531         let tx = {
3532                 let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
3533                 // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
3534                 // have a use for it unless nodes[2] learns the preimage somehow, the funds will go
3535                 // back to nodes[1] upon timeout otherwise.
3536                 assert_eq!(node_txn.len(), 1);
3537                 node_txn.remove(0)
3538         };
3539
3540         mine_transaction(&nodes[1], &tx);
3541
3542         // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
3543         check_closed_broadcast!(nodes[1], true);
3544         check_added_monitors!(nodes[1], 1);
3545         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
3546
3547         // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
3548         {
3549                 get_monitor!(nodes[2], payment_event.commitment_msg.channel_id)
3550                         .provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator), &node_cfgs[2].logger);
3551         }
3552         mine_transaction(&nodes[2], &tx);
3553         let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
3554         assert_eq!(node_txn.len(), 1);
3555         assert_eq!(node_txn[0].input.len(), 1);
3556         assert_eq!(node_txn[0].input[0].previous_output.txid, tx.txid());
3557         assert_eq!(node_txn[0].lock_time.0, 0); // Must be an HTLC-Success
3558         assert_eq!(node_txn[0].input[0].witness.len(), 5); // Must be an HTLC-Success
3559
3560         check_spends!(node_txn[0], tx);
3561 }
3562
3563 #[test]
3564 fn test_dup_events_on_peer_disconnect() {
3565         // Test that if we receive a duplicative update_fulfill_htlc message after a reconnect we do
3566         // not generate a corresponding duplicative PaymentSent event. This did not use to be the case
3567         // as we used to generate the event immediately upon receipt of the payment preimage in the
3568         // update_fulfill_htlc message.
3569
3570         let chanmon_cfgs = create_chanmon_cfgs(2);
3571         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3572         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3573         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3574         create_announced_chan_between_nodes(&nodes, 0, 1);
3575
3576         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
3577
3578         nodes[1].node.claim_funds(payment_preimage);
3579         expect_payment_claimed!(nodes[1], payment_hash, 1_000_000);
3580         check_added_monitors!(nodes[1], 1);
3581         let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
3582         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]);
3583         expect_payment_sent_without_paths!(nodes[0], payment_preimage);
3584
3585         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3586         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3587
3588         let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3589         reconnect_args.pending_htlc_claims.0 = 1;
3590         reconnect_nodes(reconnect_args);
3591         expect_payment_path_successful!(nodes[0]);
3592 }
3593
3594 #[test]
3595 fn test_peer_disconnected_before_funding_broadcasted() {
3596         // Test that channels are closed with `ClosureReason::DisconnectedPeer` if the peer disconnects
3597         // before the funding transaction has been broadcasted.
3598         let chanmon_cfgs = create_chanmon_cfgs(2);
3599         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3600         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3601         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3602
3603         // Open a channel between `nodes[0]` and `nodes[1]`, for which the funding transaction is never
3604         // broadcasted, even though it's created by `nodes[0]`.
3605         let expected_temporary_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
3606         let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
3607         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
3608         let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
3609         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
3610
3611         let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
3612         assert_eq!(temporary_channel_id, expected_temporary_channel_id);
3613
3614         assert!(nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
3615
3616         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
3617         assert_eq!(funding_created_msg.temporary_channel_id, expected_temporary_channel_id);
3618
3619         // Even though the funding transaction is created by `nodes[0]`, the `FundingCreated` msg is
3620         // never sent to `nodes[1]`, and therefore the tx is never signed by either party nor
3621         // broadcasted.
3622         {
3623                 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
3624         }
3625
3626         // Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` when the peers are
3627         // disconnected before the funding transaction was broadcasted.
3628         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3629         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3630
3631         check_closed_event(&nodes[0], 1, ClosureReason::DisconnectedPeer, false);
3632         check_closed_event(&nodes[1], 1, ClosureReason::DisconnectedPeer, false);
3633 }
3634
3635 #[test]
3636 fn test_simple_peer_disconnect() {
3637         // Test that we can reconnect when there are no lost messages
3638         let chanmon_cfgs = create_chanmon_cfgs(3);
3639         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3640         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3641         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3642         create_announced_chan_between_nodes(&nodes, 0, 1);
3643         create_announced_chan_between_nodes(&nodes, 1, 2);
3644
3645         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3646         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3647         let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3648         reconnect_args.send_channel_ready = (true, true);
3649         reconnect_nodes(reconnect_args);
3650
3651         let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
3652         let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3653         fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2);
3654         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1);
3655
3656         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3657         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3658         reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3659
3660         let (payment_preimage_3, payment_hash_3, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000);
3661         let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
3662         let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3663         let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3664
3665         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3666         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3667
3668         claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_preimage_3);
3669         fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5);
3670
3671         let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3672         reconnect_args.pending_cell_htlc_fails.0 = 1;
3673         reconnect_args.pending_cell_htlc_claims.0 = 1;
3674         reconnect_nodes(reconnect_args);
3675         {
3676                 let events = nodes[0].node.get_and_clear_pending_events();
3677                 assert_eq!(events.len(), 4);
3678                 match events[0] {
3679                         Event::PaymentSent { payment_preimage, payment_hash, .. } => {
3680                                 assert_eq!(payment_preimage, payment_preimage_3);
3681                                 assert_eq!(payment_hash, payment_hash_3);
3682                         },
3683                         _ => panic!("Unexpected event"),
3684                 }
3685                 match events[1] {
3686                         Event::PaymentPathSuccessful { .. } => {},
3687                         _ => panic!("Unexpected event"),
3688                 }
3689                 match events[2] {
3690                         Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } => {
3691                                 assert_eq!(payment_hash, payment_hash_5);
3692                                 assert!(payment_failed_permanently);
3693                         },
3694                         _ => panic!("Unexpected event"),
3695                 }
3696                 match events[3] {
3697                         Event::PaymentFailed { payment_hash, .. } => {
3698                                 assert_eq!(payment_hash, payment_hash_5);
3699                         },
3700                         _ => panic!("Unexpected event"),
3701                 }
3702         }
3703
3704         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4);
3705         fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6);
3706 }
3707
3708 fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken_lnd: bool) {
3709         // Test that we can reconnect when in-flight HTLC updates get dropped
3710         let chanmon_cfgs = create_chanmon_cfgs(2);
3711         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3712         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3713         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3714
3715         let mut as_channel_ready = None;
3716         let channel_id = if messages_delivered == 0 {
3717                 let (channel_ready, chan_id, _) = create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001);
3718                 as_channel_ready = Some(channel_ready);
3719                 // nodes[1] doesn't receive the channel_ready message (it'll be re-sent on reconnect)
3720                 // Note that we store it so that if we're running with `simulate_broken_lnd` we can deliver
3721                 // it before the channel_reestablish message.
3722                 chan_id
3723         } else {
3724                 create_announced_chan_between_nodes(&nodes, 0, 1).2
3725         };
3726
3727         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000);
3728
3729         let payment_event = {
3730                 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
3731                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
3732                 check_added_monitors!(nodes[0], 1);
3733
3734                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3735                 assert_eq!(events.len(), 1);
3736                 SendEvent::from_event(events.remove(0))
3737         };
3738         assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id);
3739
3740         if messages_delivered < 2 {
3741                 // Drop the payment_event messages, and let them get re-generated in reconnect_nodes!
3742         } else {
3743                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
3744                 if messages_delivered >= 3 {
3745                         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
3746                         check_added_monitors!(nodes[1], 1);
3747                         let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
3748
3749                         if messages_delivered >= 4 {
3750                                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
3751                                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3752                                 check_added_monitors!(nodes[0], 1);
3753
3754                                 if messages_delivered >= 5 {
3755                                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
3756                                         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
3757                                         // No commitment_signed so get_event_msg's assert(len == 1) passes
3758                                         check_added_monitors!(nodes[0], 1);
3759
3760                                         if messages_delivered >= 6 {
3761                                                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
3762                                                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3763                                                 check_added_monitors!(nodes[1], 1);
3764                                         }
3765                                 }
3766                         }
3767                 }
3768         }
3769
3770         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3771         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3772         if messages_delivered < 3 {
3773                 if simulate_broken_lnd {
3774                         // lnd has a long-standing bug where they send a channel_ready prior to a
3775                         // channel_reestablish if you reconnect prior to channel_ready time.
3776                         //
3777                         // Here we simulate that behavior, delivering a channel_ready immediately on
3778                         // reconnect. Note that we don't bother skipping the now-duplicate channel_ready sent
3779                         // in `reconnect_nodes` but we currently don't fail based on that.
3780                         //
3781                         // See-also <https://github.com/lightningnetwork/lnd/issues/4006>
3782                         nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready.as_ref().unwrap().0);
3783                 }
3784                 // Even if the channel_ready messages get exchanged, as long as nothing further was
3785                 // received on either side, both sides will need to resend them.
3786                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3787                 reconnect_args.send_channel_ready = (true, true);
3788                 reconnect_args.pending_htlc_adds.1 = 1;
3789                 reconnect_nodes(reconnect_args);
3790         } else if messages_delivered == 3 {
3791                 // nodes[0] still wants its RAA + commitment_signed
3792                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3793                 reconnect_args.pending_htlc_adds.0 = -1;
3794                 reconnect_args.pending_raa.0 = true;
3795                 reconnect_nodes(reconnect_args);
3796         } else if messages_delivered == 4 {
3797                 // nodes[0] still wants its commitment_signed
3798                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3799                 reconnect_args.pending_htlc_adds.0 = -1;
3800                 reconnect_nodes(reconnect_args);
3801         } else if messages_delivered == 5 {
3802                 // nodes[1] still wants its final RAA
3803                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3804                 reconnect_args.pending_raa.1 = true;
3805                 reconnect_nodes(reconnect_args);
3806         } else if messages_delivered == 6 {
3807                 // Everything was delivered...
3808                 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3809         }
3810
3811         let events_1 = nodes[1].node.get_and_clear_pending_events();
3812         if messages_delivered == 0 {
3813                 assert_eq!(events_1.len(), 2);
3814                 match events_1[0] {
3815                         Event::ChannelReady { .. } => { },
3816                         _ => panic!("Unexpected event"),
3817                 };
3818                 match events_1[1] {
3819                         Event::PendingHTLCsForwardable { .. } => { },
3820                         _ => panic!("Unexpected event"),
3821                 };
3822         } else {
3823                 assert_eq!(events_1.len(), 1);
3824                 match events_1[0] {
3825                         Event::PendingHTLCsForwardable { .. } => { },
3826                         _ => panic!("Unexpected event"),
3827                 };
3828         }
3829
3830         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3831         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3832         reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3833
3834         nodes[1].node.process_pending_htlc_forwards();
3835
3836         let events_2 = nodes[1].node.get_and_clear_pending_events();
3837         assert_eq!(events_2.len(), 1);
3838         match events_2[0] {
3839                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
3840                         assert_eq!(payment_hash_1, *payment_hash);
3841                         assert_eq!(amount_msat, 1_000_000);
3842                         assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
3843                         assert_eq!(via_channel_id, Some(channel_id));
3844                         match &purpose {
3845                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
3846                                         assert!(payment_preimage.is_none());
3847                                         assert_eq!(payment_secret_1, *payment_secret);
3848                                 },
3849                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
3850                         }
3851                 },
3852                 _ => panic!("Unexpected event"),
3853         }
3854
3855         nodes[1].node.claim_funds(payment_preimage_1);
3856         check_added_monitors!(nodes[1], 1);
3857         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
3858
3859         let events_3 = nodes[1].node.get_and_clear_pending_msg_events();
3860         assert_eq!(events_3.len(), 1);
3861         let (update_fulfill_htlc, commitment_signed) = match events_3[0] {
3862                 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
3863                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
3864                         assert!(updates.update_add_htlcs.is_empty());
3865                         assert!(updates.update_fail_htlcs.is_empty());
3866                         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
3867                         assert!(updates.update_fail_malformed_htlcs.is_empty());
3868                         assert!(updates.update_fee.is_none());
3869                         (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
3870                 },
3871                 _ => panic!("Unexpected event"),
3872         };
3873
3874         if messages_delivered >= 1 {
3875                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlc);
3876
3877                 let events_4 = nodes[0].node.get_and_clear_pending_events();
3878                 assert_eq!(events_4.len(), 1);
3879                 match events_4[0] {
3880                         Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
3881                                 assert_eq!(payment_preimage_1, *payment_preimage);
3882                                 assert_eq!(payment_hash_1, *payment_hash);
3883                         },
3884                         _ => panic!("Unexpected event"),
3885                 }
3886
3887                 if messages_delivered >= 2 {
3888                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
3889                         check_added_monitors!(nodes[0], 1);
3890                         let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
3891
3892                         if messages_delivered >= 3 {
3893                                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
3894                                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3895                                 check_added_monitors!(nodes[1], 1);
3896
3897                                 if messages_delivered >= 4 {
3898                                         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed);
3899                                         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
3900                                         // No commitment_signed so get_event_msg's assert(len == 1) passes
3901                                         check_added_monitors!(nodes[1], 1);
3902
3903                                         if messages_delivered >= 5 {
3904                                                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
3905                                                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3906                                                 check_added_monitors!(nodes[0], 1);
3907                                         }
3908                                 }
3909                         }
3910                 }
3911         }
3912
3913         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3914         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3915         if messages_delivered < 2 {
3916                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3917                 reconnect_args.pending_htlc_claims.0 = 1;
3918                 reconnect_nodes(reconnect_args);
3919                 if messages_delivered < 1 {
3920                         expect_payment_sent!(nodes[0], payment_preimage_1);
3921                 } else {
3922                         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3923                 }
3924         } else if messages_delivered == 2 {
3925                 // nodes[0] still wants its RAA + commitment_signed
3926                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3927                 reconnect_args.pending_htlc_adds.1 = -1;
3928                 reconnect_args.pending_raa.1 = true;
3929                 reconnect_nodes(reconnect_args);
3930         } else if messages_delivered == 3 {
3931                 // nodes[0] still wants its commitment_signed
3932                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3933                 reconnect_args.pending_htlc_adds.1 = -1;
3934                 reconnect_nodes(reconnect_args);
3935         } else if messages_delivered == 4 {
3936                 // nodes[1] still wants its final RAA
3937                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3938                 reconnect_args.pending_raa.0 = true;
3939                 reconnect_nodes(reconnect_args);
3940         } else if messages_delivered == 5 {
3941                 // Everything was delivered...
3942                 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3943         }
3944
3945         if messages_delivered == 1 || messages_delivered == 2 {
3946                 expect_payment_path_successful!(nodes[0]);
3947         }
3948         if messages_delivered <= 5 {
3949                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3950                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3951         }
3952         reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3953
3954         if messages_delivered > 2 {
3955                 expect_payment_path_successful!(nodes[0]);
3956         }
3957
3958         // Channel should still work fine...
3959         let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
3960         let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
3961         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
3962 }
3963
3964 #[test]
3965 fn test_drop_messages_peer_disconnect_a() {
3966         do_test_drop_messages_peer_disconnect(0, true);
3967         do_test_drop_messages_peer_disconnect(0, false);
3968         do_test_drop_messages_peer_disconnect(1, false);
3969         do_test_drop_messages_peer_disconnect(2, false);
3970 }
3971
3972 #[test]
3973 fn test_drop_messages_peer_disconnect_b() {
3974         do_test_drop_messages_peer_disconnect(3, false);
3975         do_test_drop_messages_peer_disconnect(4, false);
3976         do_test_drop_messages_peer_disconnect(5, false);
3977         do_test_drop_messages_peer_disconnect(6, false);
3978 }
3979
3980 #[test]
3981 fn test_channel_ready_without_best_block_updated() {
3982         // Previously, if we were offline when a funding transaction was locked in, and then we came
3983         // back online, calling best_block_updated once followed by transactions_confirmed, we'd not
3984         // generate a channel_ready until a later best_block_updated. This tests that we generate the
3985         // channel_ready immediately instead.
3986         let chanmon_cfgs = create_chanmon_cfgs(2);
3987         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3988         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3989         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3990         *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
3991
3992         let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
3993
3994         let conf_height = nodes[0].best_block_info().1 + 1;
3995         connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
3996         let block_txn = [funding_tx];
3997         let conf_txn: Vec<_> = block_txn.iter().enumerate().collect();
3998         let conf_block_header = nodes[0].get_block_header(conf_height);
3999         nodes[0].node.transactions_confirmed(&conf_block_header, &conf_txn[..], conf_height);
4000
4001         // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
4002         let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
4003         nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
4004 }
4005
4006 #[test]
4007 fn test_drop_messages_peer_disconnect_dual_htlc() {
4008         // Test that we can handle reconnecting when both sides of a channel have pending
4009         // commitment_updates when we disconnect.
4010         let chanmon_cfgs = create_chanmon_cfgs(2);
4011         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4012         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4013         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4014         create_announced_chan_between_nodes(&nodes, 0, 1);
4015
4016         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
4017
4018         // Now try to send a second payment which will fail to send
4019         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
4020         nodes[0].node.send_payment_with_route(&route, payment_hash_2,
4021                 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
4022         check_added_monitors!(nodes[0], 1);
4023
4024         let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
4025         assert_eq!(events_1.len(), 1);
4026         match events_1[0] {
4027                 MessageSendEvent::UpdateHTLCs { .. } => {},
4028                 _ => panic!("Unexpected event"),
4029         }
4030
4031         nodes[1].node.claim_funds(payment_preimage_1);
4032         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
4033         check_added_monitors!(nodes[1], 1);
4034
4035         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
4036         assert_eq!(events_2.len(), 1);
4037         match events_2[0] {
4038                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
4039                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
4040                         assert!(update_add_htlcs.is_empty());
4041                         assert_eq!(update_fulfill_htlcs.len(), 1);
4042                         assert!(update_fail_htlcs.is_empty());
4043                         assert!(update_fail_malformed_htlcs.is_empty());
4044                         assert!(update_fee.is_none());
4045
4046                         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
4047                         let events_3 = nodes[0].node.get_and_clear_pending_events();
4048                         assert_eq!(events_3.len(), 1);
4049                         match events_3[0] {
4050                                 Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
4051                                         assert_eq!(*payment_preimage, payment_preimage_1);
4052                                         assert_eq!(*payment_hash, payment_hash_1);
4053                                 },
4054                                 _ => panic!("Unexpected event"),
4055                         }
4056
4057                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
4058                         let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4059                         // No commitment_signed so get_event_msg's assert(len == 1) passes
4060                         check_added_monitors!(nodes[0], 1);
4061                 },
4062                 _ => panic!("Unexpected event"),
4063         }
4064
4065         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
4066         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
4067
4068         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
4069                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
4070         }, true).unwrap();
4071         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
4072         assert_eq!(reestablish_1.len(), 1);
4073         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
4074                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
4075         }, false).unwrap();
4076         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
4077         assert_eq!(reestablish_2.len(), 1);
4078
4079         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
4080         let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
4081         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
4082         let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
4083
4084         assert!(as_resp.0.is_none());
4085         assert!(bs_resp.0.is_none());
4086
4087         assert!(bs_resp.1.is_none());
4088         assert!(bs_resp.2.is_none());
4089
4090         assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
4091
4092         assert_eq!(as_resp.2.as_ref().unwrap().update_add_htlcs.len(), 1);
4093         assert!(as_resp.2.as_ref().unwrap().update_fulfill_htlcs.is_empty());
4094         assert!(as_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
4095         assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
4096         assert!(as_resp.2.as_ref().unwrap().update_fee.is_none());
4097         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]);
4098         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed);
4099         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4100         // No commitment_signed so get_event_msg's assert(len == 1) passes
4101         check_added_monitors!(nodes[1], 1);
4102
4103         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap());
4104         let bs_second_commitment_signed = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4105         assert!(bs_second_commitment_signed.update_add_htlcs.is_empty());
4106         assert!(bs_second_commitment_signed.update_fulfill_htlcs.is_empty());
4107         assert!(bs_second_commitment_signed.update_fail_htlcs.is_empty());
4108         assert!(bs_second_commitment_signed.update_fail_malformed_htlcs.is_empty());
4109         assert!(bs_second_commitment_signed.update_fee.is_none());
4110         check_added_monitors!(nodes[1], 1);
4111
4112         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
4113         let as_commitment_signed = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4114         assert!(as_commitment_signed.update_add_htlcs.is_empty());
4115         assert!(as_commitment_signed.update_fulfill_htlcs.is_empty());
4116         assert!(as_commitment_signed.update_fail_htlcs.is_empty());
4117         assert!(as_commitment_signed.update_fail_malformed_htlcs.is_empty());
4118         assert!(as_commitment_signed.update_fee.is_none());
4119         check_added_monitors!(nodes[0], 1);
4120
4121         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed);
4122         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4123         // No commitment_signed so get_event_msg's assert(len == 1) passes
4124         check_added_monitors!(nodes[0], 1);
4125
4126         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed);
4127         let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4128         // No commitment_signed so get_event_msg's assert(len == 1) passes
4129         check_added_monitors!(nodes[1], 1);
4130
4131         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
4132         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4133         check_added_monitors!(nodes[1], 1);
4134
4135         expect_pending_htlcs_forwardable!(nodes[1]);
4136
4137         let events_5 = nodes[1].node.get_and_clear_pending_events();
4138         assert_eq!(events_5.len(), 1);
4139         match events_5[0] {
4140                 Event::PaymentClaimable { ref payment_hash, ref purpose, .. } => {
4141                         assert_eq!(payment_hash_2, *payment_hash);
4142                         match &purpose {
4143                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
4144                                         assert!(payment_preimage.is_none());
4145                                         assert_eq!(payment_secret_2, *payment_secret);
4146                                 },
4147                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
4148                         }
4149                 },
4150                 _ => panic!("Unexpected event"),
4151         }
4152
4153         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack);
4154         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4155         check_added_monitors!(nodes[0], 1);
4156
4157         expect_payment_path_successful!(nodes[0]);
4158         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
4159 }
4160
4161 fn do_test_htlc_timeout(send_partial_mpp: bool) {
4162         // If the user fails to claim/fail an HTLC within the HTLC CLTV timeout we fail it for them
4163         // to avoid our counterparty failing the channel.
4164         let chanmon_cfgs = create_chanmon_cfgs(2);
4165         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4166         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4167         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4168
4169         create_announced_chan_between_nodes(&nodes, 0, 1);
4170
4171         let our_payment_hash = if send_partial_mpp {
4172                 let (route, our_payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100000);
4173                 // Use the utility function send_payment_along_path to send the payment with MPP data which
4174                 // indicates there are more HTLCs coming.
4175                 let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
4176                 let payment_id = PaymentId([42; 32]);
4177                 let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
4178                         RecipientOnionFields::secret_only(payment_secret), payment_id, &route).unwrap();
4179                 nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash,
4180                         RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id,
4181                         &None, session_privs[0]).unwrap();
4182                 check_added_monitors!(nodes[0], 1);
4183                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
4184                 assert_eq!(events.len(), 1);
4185                 // Now do the relevant commitment_signed/RAA dances along the path, noting that the final
4186                 // hop should *not* yet generate any PaymentClaimable event(s).
4187                 pass_along_path(&nodes[0], &[&nodes[1]], 100000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
4188                 our_payment_hash
4189         } else {
4190                 route_payment(&nodes[0], &[&nodes[1]], 100000).1
4191         };
4192
4193         let mut block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new());
4194         connect_block(&nodes[0], &block);
4195         connect_block(&nodes[1], &block);
4196         let block_count = TEST_FINAL_CLTV + CHAN_CONFIRM_DEPTH + 2 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS;
4197         for _ in CHAN_CONFIRM_DEPTH + 2..block_count {
4198                 block.header.prev_blockhash = block.block_hash();
4199                 connect_block(&nodes[0], &block);
4200                 connect_block(&nodes[1], &block);
4201         }
4202
4203         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
4204
4205         check_added_monitors!(nodes[1], 1);
4206         let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4207         assert!(htlc_timeout_updates.update_add_htlcs.is_empty());
4208         assert_eq!(htlc_timeout_updates.update_fail_htlcs.len(), 1);
4209         assert!(htlc_timeout_updates.update_fail_malformed_htlcs.is_empty());
4210         assert!(htlc_timeout_updates.update_fee.is_none());
4211
4212         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_timeout_updates.update_fail_htlcs[0]);
4213         commitment_signed_dance!(nodes[0], nodes[1], htlc_timeout_updates.commitment_signed, false);
4214         // 100_000 msat as u64, followed by the height at which we failed back above
4215         let mut expected_failure_data = (100_000 as u64).to_be_bytes().to_vec();
4216         expected_failure_data.extend_from_slice(&(block_count - 1).to_be_bytes());
4217         expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000 | 15, &expected_failure_data[..]);
4218 }
4219
4220 #[test]
4221 fn test_htlc_timeout() {
4222         do_test_htlc_timeout(true);
4223         do_test_htlc_timeout(false);
4224 }
4225
4226 fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) {
4227         // Tests that HTLCs in the holding cell are timed out after the requisite number of blocks.
4228         let chanmon_cfgs = create_chanmon_cfgs(3);
4229         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
4230         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
4231         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
4232         create_announced_chan_between_nodes(&nodes, 0, 1);
4233         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4234
4235         // Make sure all nodes are at the same starting height
4236         connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
4237         connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
4238         connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
4239
4240         // Route a first payment to get the 1 -> 2 channel in awaiting_raa...
4241         let (route, first_payment_hash, _, first_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
4242         nodes[1].node.send_payment_with_route(&route, first_payment_hash,
4243                 RecipientOnionFields::secret_only(first_payment_secret), PaymentId(first_payment_hash.0)).unwrap();
4244         assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1);
4245         check_added_monitors!(nodes[1], 1);
4246
4247         // Now attempt to route a second payment, which should be placed in the holding cell
4248         let sending_node = if forwarded_htlc { &nodes[0] } else { &nodes[1] };
4249         let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(sending_node, nodes[2], 100000);
4250         sending_node.node.send_payment_with_route(&route, second_payment_hash,
4251                 RecipientOnionFields::secret_only(second_payment_secret), PaymentId(second_payment_hash.0)).unwrap();
4252         if forwarded_htlc {
4253                 check_added_monitors!(nodes[0], 1);
4254                 let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
4255                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
4256                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
4257                 expect_pending_htlcs_forwardable!(nodes[1]);
4258         }
4259         check_added_monitors!(nodes[1], 0);
4260
4261         connect_blocks(&nodes[1], TEST_FINAL_CLTV - LATENCY_GRACE_PERIOD_BLOCKS);
4262         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4263         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
4264         connect_blocks(&nodes[1], 1);
4265
4266         if forwarded_htlc {
4267                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
4268                 check_added_monitors!(nodes[1], 1);
4269                 let fail_commit = nodes[1].node.get_and_clear_pending_msg_events();
4270                 assert_eq!(fail_commit.len(), 1);
4271                 match fail_commit[0] {
4272                         MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, ref commitment_signed, .. }, .. } => {
4273                                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
4274                                 commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, true, true);
4275                         },
4276                         _ => unreachable!(),
4277                 }
4278                 expect_payment_failed_with_update!(nodes[0], second_payment_hash, false, chan_2.0.contents.short_channel_id, false);
4279         } else {
4280                 expect_payment_failed!(nodes[1], second_payment_hash, false);
4281         }
4282 }
4283
4284 #[test]
4285 fn test_holding_cell_htlc_add_timeouts() {
4286         do_test_holding_cell_htlc_add_timeouts(false);
4287         do_test_holding_cell_htlc_add_timeouts(true);
4288 }
4289
4290 macro_rules! check_spendable_outputs {
4291         ($node: expr, $keysinterface: expr) => {
4292                 {
4293                         let mut events = $node.chain_monitor.chain_monitor.get_and_clear_pending_events();
4294                         let mut txn = Vec::new();
4295                         let mut all_outputs = Vec::new();
4296                         let secp_ctx = Secp256k1::new();
4297                         for event in events.drain(..) {
4298                                 match event {
4299                                         Event::SpendableOutputs { mut outputs } => {
4300                                                 for outp in outputs.drain(..) {
4301                                                         txn.push($keysinterface.backing.spend_spendable_outputs(&[&outp], Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx).unwrap());
4302                                                         all_outputs.push(outp);
4303                                                 }
4304                                         },
4305                                         _ => panic!("Unexpected event"),
4306                                 };
4307                         }
4308                         if all_outputs.len() > 1 {
4309                                 if let Ok(tx) = $keysinterface.backing.spend_spendable_outputs(&all_outputs.iter().map(|a| a).collect::<Vec<_>>(), Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx) {
4310                                         txn.push(tx);
4311                                 }
4312                         }
4313                         txn
4314                 }
4315         }
4316 }
4317
4318 #[test]
4319 fn test_claim_sizeable_push_msat() {
4320         // Incidentally test SpendableOutput event generation due to detection of to_local output on commitment tx
4321         let chanmon_cfgs = create_chanmon_cfgs(2);
4322         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4323         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4324         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4325
4326         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000);
4327         nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
4328         check_closed_broadcast!(nodes[1], true);
4329         check_added_monitors!(nodes[1], 1);
4330         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
4331         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4332         assert_eq!(node_txn.len(), 1);
4333         check_spends!(node_txn[0], chan.3);
4334         assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
4335
4336         mine_transaction(&nodes[1], &node_txn[0]);
4337         connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1);
4338
4339         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4340         assert_eq!(spend_txn.len(), 1);
4341         assert_eq!(spend_txn[0].input.len(), 1);
4342         check_spends!(spend_txn[0], node_txn[0]);
4343         assert_eq!(spend_txn[0].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
4344 }
4345
4346 #[test]
4347 fn test_claim_on_remote_sizeable_push_msat() {
4348         // Same test as previous, just test on remote commitment tx, as per_commitment_point registration changes following you're funder/fundee and
4349         // to_remote output is encumbered by a P2WPKH
4350         let chanmon_cfgs = create_chanmon_cfgs(2);
4351         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4352         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4353         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4354
4355         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000);
4356         nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
4357         check_closed_broadcast!(nodes[0], true);
4358         check_added_monitors!(nodes[0], 1);
4359         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
4360
4361         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4362         assert_eq!(node_txn.len(), 1);
4363         check_spends!(node_txn[0], chan.3);
4364         assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
4365
4366         mine_transaction(&nodes[1], &node_txn[0]);
4367         check_closed_broadcast!(nodes[1], true);
4368         check_added_monitors!(nodes[1], 1);
4369         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
4370         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4371
4372         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4373         assert_eq!(spend_txn.len(), 1);
4374         check_spends!(spend_txn[0], node_txn[0]);
4375 }
4376
4377 #[test]
4378 fn test_claim_on_remote_revoked_sizeable_push_msat() {
4379         // Same test as previous, just test on remote revoked commitment tx, as per_commitment_point registration changes following you're funder/fundee and
4380         // to_remote output is encumbered by a P2WPKH
4381
4382         let chanmon_cfgs = create_chanmon_cfgs(2);
4383         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4384         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4385         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4386
4387         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 59000000);
4388         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4389         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan.2);
4390         assert_eq!(revoked_local_txn[0].input.len(), 1);
4391         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
4392
4393         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4394         mine_transaction(&nodes[1], &revoked_local_txn[0]);
4395         check_closed_broadcast!(nodes[1], true);
4396         check_added_monitors!(nodes[1], 1);
4397         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
4398
4399         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4400         mine_transaction(&nodes[1], &node_txn[0]);
4401         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4402
4403         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4404         assert_eq!(spend_txn.len(), 3);
4405         check_spends!(spend_txn[0], revoked_local_txn[0]); // to_remote output on revoked remote commitment_tx
4406         check_spends!(spend_txn[1], node_txn[0]);
4407         check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[0]); // Both outputs
4408 }
4409
4410 #[test]
4411 fn test_static_spendable_outputs_preimage_tx() {
4412         let chanmon_cfgs = create_chanmon_cfgs(2);
4413         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4414         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4415         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4416
4417         // Create some initial channels
4418         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4419
4420         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
4421
4422         let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
4423         assert_eq!(commitment_tx[0].input.len(), 1);
4424         assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
4425
4426         // Settle A's commitment tx on B's chain
4427         nodes[1].node.claim_funds(payment_preimage);
4428         expect_payment_claimed!(nodes[1], payment_hash, 3_000_000);
4429         check_added_monitors!(nodes[1], 1);
4430         mine_transaction(&nodes[1], &commitment_tx[0]);
4431         check_added_monitors!(nodes[1], 1);
4432         let events = nodes[1].node.get_and_clear_pending_msg_events();
4433         match events[0] {
4434                 MessageSendEvent::UpdateHTLCs { .. } => {},
4435                 _ => panic!("Unexpected event"),
4436         }
4437         match events[1] {
4438                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4439                 _ => panic!("Unexepected event"),
4440         }
4441
4442         // Check B's monitor was able to send back output descriptor event for preimage tx on A's commitment tx
4443         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: preimage tx
4444         assert_eq!(node_txn.len(), 1);
4445         check_spends!(node_txn[0], commitment_tx[0]);
4446         assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
4447
4448         mine_transaction(&nodes[1], &node_txn[0]);
4449         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
4450         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4451
4452         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4453         assert_eq!(spend_txn.len(), 1);
4454         check_spends!(spend_txn[0], node_txn[0]);
4455 }
4456
4457 #[test]
4458 fn test_static_spendable_outputs_timeout_tx() {
4459         let chanmon_cfgs = create_chanmon_cfgs(2);
4460         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4461         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4462         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4463
4464         // Create some initial channels
4465         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4466
4467         // Rebalance the network a bit by relaying one payment through all the channels ...
4468         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
4469
4470         let (_, our_payment_hash, _) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000);
4471
4472         let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
4473         assert_eq!(commitment_tx[0].input.len(), 1);
4474         assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
4475
4476         // Settle A's commitment tx on B' chain
4477         mine_transaction(&nodes[1], &commitment_tx[0]);
4478         check_added_monitors!(nodes[1], 1);
4479         let events = nodes[1].node.get_and_clear_pending_msg_events();
4480         match events[0] {
4481                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4482                 _ => panic!("Unexpected event"),
4483         }
4484         connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
4485
4486         // Check B's monitor was able to send back output descriptor event for timeout tx on A's commitment tx
4487         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4488         assert_eq!(node_txn.len(), 1); // ChannelMonitor: timeout tx
4489         check_spends!(node_txn[0],  commitment_tx[0].clone());
4490         assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4491
4492         mine_transaction(&nodes[1], &node_txn[0]);
4493         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
4494         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4495         expect_payment_failed!(nodes[1], our_payment_hash, false);
4496
4497         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4498         assert_eq!(spend_txn.len(), 3); // SpendableOutput: remote_commitment_tx.to_remote, timeout_tx.output
4499         check_spends!(spend_txn[0], commitment_tx[0]);
4500         check_spends!(spend_txn[1], node_txn[0]);
4501         check_spends!(spend_txn[2], node_txn[0], commitment_tx[0]); // All outputs
4502 }
4503
4504 #[test]
4505 fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
4506         let chanmon_cfgs = create_chanmon_cfgs(2);
4507         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4508         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4509         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4510
4511         // Create some initial channels
4512         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4513
4514         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4515         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
4516         assert_eq!(revoked_local_txn[0].input.len(), 1);
4517         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4518
4519         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4520
4521         mine_transaction(&nodes[1], &revoked_local_txn[0]);
4522         check_closed_broadcast!(nodes[1], true);
4523         check_added_monitors!(nodes[1], 1);
4524         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
4525
4526         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4527         assert_eq!(node_txn.len(), 1);
4528         assert_eq!(node_txn[0].input.len(), 2);
4529         check_spends!(node_txn[0], revoked_local_txn[0]);
4530
4531         mine_transaction(&nodes[1], &node_txn[0]);
4532         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4533
4534         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4535         assert_eq!(spend_txn.len(), 1);
4536         check_spends!(spend_txn[0], node_txn[0]);
4537 }
4538
4539 #[test]
4540 fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
4541         let mut chanmon_cfgs = create_chanmon_cfgs(2);
4542         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
4543         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4544         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4545         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4546
4547         // Create some initial channels
4548         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4549
4550         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4551         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
4552         assert_eq!(revoked_local_txn[0].input.len(), 1);
4553         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4554
4555         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4556
4557         // A will generate HTLC-Timeout from revoked commitment tx
4558         mine_transaction(&nodes[0], &revoked_local_txn[0]);
4559         check_closed_broadcast!(nodes[0], true);
4560         check_added_monitors!(nodes[0], 1);
4561         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
4562         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
4563
4564         let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4565         assert_eq!(revoked_htlc_txn.len(), 1);
4566         assert_eq!(revoked_htlc_txn[0].input.len(), 1);
4567         assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
4568         check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]);
4569         assert_ne!(revoked_htlc_txn[0].lock_time.0, 0); // HTLC-Timeout
4570
4571         // B will generate justice tx from A's revoked commitment/HTLC tx
4572         connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
4573         check_closed_broadcast!(nodes[1], true);
4574         check_added_monitors!(nodes[1], 1);
4575         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
4576
4577         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4578         assert_eq!(node_txn.len(), 2); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs
4579         // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0]
4580         // including the one already spent by revoked_htlc_txn[1]. That's OK, we'll spend with valid
4581         // transactions next...
4582         assert_eq!(node_txn[0].input.len(), 3);
4583         check_spends!(node_txn[0], revoked_local_txn[0], revoked_htlc_txn[0]);
4584
4585         assert_eq!(node_txn[1].input.len(), 2);
4586         check_spends!(node_txn[1], revoked_local_txn[0], revoked_htlc_txn[0]);
4587         if node_txn[1].input[1].previous_output.txid == revoked_htlc_txn[0].txid() {
4588                 assert_ne!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4589         } else {
4590                 assert_eq!(node_txn[1].input[0].previous_output.txid, revoked_htlc_txn[0].txid());
4591                 assert_ne!(node_txn[1].input[1].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4592         }
4593
4594         mine_transaction(&nodes[1], &node_txn[1]);
4595         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4596
4597         // Check B's ChannelMonitor was able to generate the right spendable output descriptor
4598         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4599         assert_eq!(spend_txn.len(), 1);
4600         assert_eq!(spend_txn[0].input.len(), 1);
4601         check_spends!(spend_txn[0], node_txn[1]);
4602 }
4603
4604 #[test]
4605 fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
4606         let mut chanmon_cfgs = create_chanmon_cfgs(2);
4607         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
4608         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4609         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4610         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4611
4612         // Create some initial channels
4613         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4614
4615         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4616         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
4617         assert_eq!(revoked_local_txn[0].input.len(), 1);
4618         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4619
4620         // The to-be-revoked commitment tx should have one HTLC and one to_remote output
4621         assert_eq!(revoked_local_txn[0].output.len(), 2);
4622
4623         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4624
4625         // B will generate HTLC-Success from revoked commitment tx
4626         mine_transaction(&nodes[1], &revoked_local_txn[0]);
4627         check_closed_broadcast!(nodes[1], true);
4628         check_added_monitors!(nodes[1], 1);
4629         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
4630         let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4631
4632         assert_eq!(revoked_htlc_txn.len(), 1);
4633         assert_eq!(revoked_htlc_txn[0].input.len(), 1);
4634         assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4635         check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]);
4636
4637         // Check that the unspent (of two) outputs on revoked_local_txn[0] is a P2WPKH:
4638         let unspent_local_txn_output = revoked_htlc_txn[0].input[0].previous_output.vout as usize ^ 1;
4639         assert_eq!(revoked_local_txn[0].output[unspent_local_txn_output].script_pubkey.len(), 2 + 20); // P2WPKH
4640
4641         // A will generate justice tx from B's revoked commitment/HTLC tx
4642         connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
4643         check_closed_broadcast!(nodes[0], true);
4644         check_added_monitors!(nodes[0], 1);
4645         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
4646
4647         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4648         assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success
4649
4650         // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0]
4651         // including the one already spent by revoked_htlc_txn[0]. That's OK, we'll spend with valid
4652         // transactions next...
4653         assert_eq!(node_txn[0].input.len(), 2);
4654         check_spends!(node_txn[0], revoked_local_txn[0], revoked_htlc_txn[0]);
4655         if node_txn[0].input[1].previous_output.txid == revoked_htlc_txn[0].txid() {
4656                 assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4657         } else {
4658                 assert_eq!(node_txn[0].input[0].previous_output.txid, revoked_htlc_txn[0].txid());
4659                 assert_eq!(node_txn[0].input[1].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4660         }
4661
4662         assert_eq!(node_txn[1].input.len(), 1);
4663         check_spends!(node_txn[1], revoked_htlc_txn[0]);
4664
4665         mine_transaction(&nodes[0], &node_txn[1]);
4666         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
4667
4668         // Note that nodes[0]'s tx_broadcaster is still locked, so if we get here the channelmonitor
4669         // didn't try to generate any new transactions.
4670
4671         // Check A's ChannelMonitor was able to generate the right spendable output descriptor
4672         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
4673         assert_eq!(spend_txn.len(), 3);
4674         assert_eq!(spend_txn[0].input.len(), 1);
4675         check_spends!(spend_txn[0], revoked_local_txn[0]); // spending to_remote output from revoked local tx
4676         assert_ne!(spend_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4677         check_spends!(spend_txn[1], node_txn[1]); // spending justice tx output on the htlc success tx
4678         check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[1]); // Both outputs
4679 }
4680
4681 #[test]
4682 fn test_onchain_to_onchain_claim() {
4683         // Test that in case of channel closure, we detect the state of output and claim HTLC
4684         // on downstream peer's remote commitment tx.
4685         // First, have C claim an HTLC against its own latest commitment transaction.
4686         // Then, broadcast these to B, which should update the monitor downstream on the A<->B
4687         // channel.
4688         // Finally, check that B will claim the HTLC output if A's latest commitment transaction
4689         // gets broadcast.
4690
4691         let chanmon_cfgs = create_chanmon_cfgs(3);
4692         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
4693         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
4694         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
4695
4696         // Create some initial channels
4697         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4698         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4699
4700         // Ensure all nodes are at the same height
4701         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
4702         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
4703         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
4704         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
4705
4706         // Rebalance the network a bit by relaying one payment through all the channels ...
4707         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
4708         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
4709
4710         let (payment_preimage, payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
4711         let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
4712         check_spends!(commitment_tx[0], chan_2.3);
4713         nodes[2].node.claim_funds(payment_preimage);
4714         expect_payment_claimed!(nodes[2], payment_hash, 3_000_000);
4715         check_added_monitors!(nodes[2], 1);
4716         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
4717         assert!(updates.update_add_htlcs.is_empty());
4718         assert!(updates.update_fail_htlcs.is_empty());
4719         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
4720         assert!(updates.update_fail_malformed_htlcs.is_empty());
4721
4722         mine_transaction(&nodes[2], &commitment_tx[0]);
4723         check_closed_broadcast!(nodes[2], true);
4724         check_added_monitors!(nodes[2], 1);
4725         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
4726
4727         let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 1 (HTLC-Success tx)
4728         assert_eq!(c_txn.len(), 1);
4729         check_spends!(c_txn[0], commitment_tx[0]);
4730         assert_eq!(c_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4731         assert!(c_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
4732         assert_eq!(c_txn[0].lock_time.0, 0); // Success tx
4733
4734         // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor
4735         connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), c_txn[0].clone()]));
4736         check_added_monitors!(nodes[1], 1);
4737         let events = nodes[1].node.get_and_clear_pending_events();
4738         assert_eq!(events.len(), 2);
4739         match events[0] {
4740                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
4741                 _ => panic!("Unexpected event"),
4742         }
4743         match events[1] {
4744                 Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => {
4745                         assert_eq!(fee_earned_msat, Some(1000));
4746                         assert_eq!(prev_channel_id, Some(chan_1.2));
4747                         assert_eq!(claim_from_onchain_tx, true);
4748                         assert_eq!(next_channel_id, Some(chan_2.2));
4749                         assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
4750                 },
4751                 _ => panic!("Unexpected event"),
4752         }
4753         check_added_monitors!(nodes[1], 1);
4754         let mut msg_events = nodes[1].node.get_and_clear_pending_msg_events();
4755         assert_eq!(msg_events.len(), 3);
4756         let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut msg_events);
4757         let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut msg_events);
4758
4759         match nodes_2_event {
4760                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id: _ } => {},
4761                 _ => panic!("Unexpected event"),
4762         }
4763
4764         match nodes_0_event {
4765                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
4766                         assert!(update_add_htlcs.is_empty());
4767                         assert!(update_fail_htlcs.is_empty());
4768                         assert_eq!(update_fulfill_htlcs.len(), 1);
4769                         assert!(update_fail_malformed_htlcs.is_empty());
4770                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
4771                 },
4772                 _ => panic!("Unexpected event"),
4773         };
4774
4775         // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
4776         match msg_events[0] {
4777                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4778                 _ => panic!("Unexpected event"),
4779         }
4780
4781         // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
4782         let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
4783         mine_transaction(&nodes[1], &commitment_tx[0]);
4784         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
4785         let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4786         // ChannelMonitor: HTLC-Success tx
4787         assert_eq!(b_txn.len(), 1);
4788         check_spends!(b_txn[0], commitment_tx[0]);
4789         assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
4790         assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
4791         assert_eq!(b_txn[0].lock_time.0, nodes[1].best_block_info().1); // Success tx
4792
4793         check_closed_broadcast!(nodes[1], true);
4794         check_added_monitors!(nodes[1], 1);
4795 }
4796
4797 #[test]
4798 fn test_duplicate_payment_hash_one_failure_one_success() {
4799         // Topology : A --> B --> C --> D
4800         // We route 2 payments with same hash between B and C, one will be timeout, the other successfully claim
4801         // Note that because C will refuse to generate two payment secrets for the same payment hash,
4802         // we forward one of the payments onwards to D.
4803         let chanmon_cfgs = create_chanmon_cfgs(4);
4804         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
4805         // When this test was written, the default base fee floated based on the HTLC count.
4806         // It is now fixed, so we simply set the fee to the expected value here.
4807         let mut config = test_default_channel_config();
4808         config.channel_config.forwarding_fee_base_msat = 196;
4809         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs,
4810                 &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
4811         let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
4812
4813         create_announced_chan_between_nodes(&nodes, 0, 1);
4814         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4815         create_announced_chan_between_nodes(&nodes, 2, 3);
4816
4817         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
4818         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
4819         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
4820         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
4821         connect_blocks(&nodes[3], node_max_height - nodes[3].best_block_info().1);
4822
4823         let (our_payment_preimage, duplicate_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 900_000);
4824
4825         let payment_secret = nodes[3].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200, None).unwrap();
4826         // We reduce the final CLTV here by a somewhat arbitrary constant to keep it under the one-byte
4827         // script push size limit so that the below script length checks match
4828         // ACCEPTED_HTLC_SCRIPT_WEIGHT.
4829         let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV - 40)
4830                 .with_bolt11_features(nodes[3].node.invoice_features()).unwrap();
4831         let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[3], payment_params, 800_000);
4832         send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[2], &nodes[3]]], 800_000, duplicate_payment_hash, payment_secret);
4833
4834         let commitment_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
4835         assert_eq!(commitment_txn[0].input.len(), 1);
4836         check_spends!(commitment_txn[0], chan_2.3);
4837
4838         mine_transaction(&nodes[1], &commitment_txn[0]);
4839         check_closed_broadcast!(nodes[1], true);
4840         check_added_monitors!(nodes[1], 1);
4841         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
4842         connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
4843
4844         let htlc_timeout_tx;
4845         { // Extract one of the two HTLC-Timeout transaction
4846                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
4847                 // ChannelMonitor: timeout tx * 2-or-3
4848                 assert!(node_txn.len() == 2 || node_txn.len() == 3);
4849
4850                 check_spends!(node_txn[0], commitment_txn[0]);
4851                 assert_eq!(node_txn[0].input.len(), 1);
4852                 assert_eq!(node_txn[0].output.len(), 1);
4853
4854                 if node_txn.len() > 2 {
4855                         check_spends!(node_txn[1], commitment_txn[0]);
4856                         assert_eq!(node_txn[1].input.len(), 1);
4857                         assert_eq!(node_txn[1].output.len(), 1);
4858                         assert_eq!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
4859
4860                         check_spends!(node_txn[2], commitment_txn[0]);
4861                         assert_eq!(node_txn[2].input.len(), 1);
4862                         assert_eq!(node_txn[2].output.len(), 1);
4863                         assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output);
4864                 } else {
4865                         check_spends!(node_txn[1], commitment_txn[0]);
4866                         assert_eq!(node_txn[1].input.len(), 1);
4867                         assert_eq!(node_txn[1].output.len(), 1);
4868                         assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
4869                 }
4870
4871                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4872                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4873                 // Assign htlc_timeout_tx to the forwarded HTLC (with value ~800 sats). The received HTLC
4874                 // (with value 900 sats) will be claimed in the below `claim_funds` call.
4875                 if node_txn.len() > 2 {
4876                         assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4877                         htlc_timeout_tx = if node_txn[2].output[0].value < 900 { node_txn[2].clone() } else { node_txn[0].clone() };
4878                 } else {
4879                         htlc_timeout_tx = if node_txn[0].output[0].value < 900 { node_txn[1].clone() } else { node_txn[0].clone() };
4880                 }
4881         }
4882
4883         nodes[2].node.claim_funds(our_payment_preimage);
4884         expect_payment_claimed!(nodes[2], duplicate_payment_hash, 900_000);
4885
4886         mine_transaction(&nodes[2], &commitment_txn[0]);
4887         check_added_monitors!(nodes[2], 2);
4888         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
4889         let events = nodes[2].node.get_and_clear_pending_msg_events();
4890         match events[0] {
4891                 MessageSendEvent::UpdateHTLCs { .. } => {},
4892                 _ => panic!("Unexpected event"),
4893         }
4894         match events[1] {
4895                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4896                 _ => panic!("Unexepected event"),
4897         }
4898         let htlc_success_txn: Vec<_> = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4899         assert_eq!(htlc_success_txn.len(), 2); // ChannelMonitor: HTLC-Success txn (*2 due to 2-HTLC outputs)
4900         check_spends!(htlc_success_txn[0], commitment_txn[0]);
4901         check_spends!(htlc_success_txn[1], commitment_txn[0]);
4902         assert_eq!(htlc_success_txn[0].input.len(), 1);
4903         assert_eq!(htlc_success_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4904         assert_eq!(htlc_success_txn[1].input.len(), 1);
4905         assert_eq!(htlc_success_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4906         assert_ne!(htlc_success_txn[0].input[0].previous_output, htlc_success_txn[1].input[0].previous_output);
4907         assert_ne!(htlc_success_txn[1].input[0].previous_output, htlc_timeout_tx.input[0].previous_output);
4908
4909         mine_transaction(&nodes[1], &htlc_timeout_tx);
4910         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4911         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
4912         let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4913         assert!(htlc_updates.update_add_htlcs.is_empty());
4914         assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
4915         let first_htlc_id = htlc_updates.update_fail_htlcs[0].htlc_id;
4916         assert!(htlc_updates.update_fulfill_htlcs.is_empty());
4917         assert!(htlc_updates.update_fail_malformed_htlcs.is_empty());
4918         check_added_monitors!(nodes[1], 1);
4919
4920         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
4921         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4922         {
4923                 commitment_signed_dance!(nodes[0], nodes[1], &htlc_updates.commitment_signed, false, true);
4924         }
4925         expect_payment_failed_with_update!(nodes[0], duplicate_payment_hash, false, chan_2.0.contents.short_channel_id, true);
4926
4927         // Solve 2nd HTLC by broadcasting on B's chain HTLC-Success Tx from C
4928         mine_transaction(&nodes[1], &htlc_success_txn[1]);
4929         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(196), true, true);
4930         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4931         assert!(updates.update_add_htlcs.is_empty());
4932         assert!(updates.update_fail_htlcs.is_empty());
4933         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
4934         assert_ne!(updates.update_fulfill_htlcs[0].htlc_id, first_htlc_id);
4935         assert!(updates.update_fail_malformed_htlcs.is_empty());
4936         check_added_monitors!(nodes[1], 1);
4937
4938         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
4939         commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false);
4940         expect_payment_sent(&nodes[0], our_payment_preimage, None, true);
4941 }
4942
4943 #[test]
4944 fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
4945         let chanmon_cfgs = create_chanmon_cfgs(2);
4946         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4947         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4948         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4949
4950         // Create some initial channels
4951         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4952
4953         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
4954         let local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
4955         assert_eq!(local_txn.len(), 1);
4956         assert_eq!(local_txn[0].input.len(), 1);
4957         check_spends!(local_txn[0], chan_1.3);
4958
4959         // Give B knowledge of preimage to be able to generate a local HTLC-Success Tx
4960         nodes[1].node.claim_funds(payment_preimage);
4961         expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
4962         check_added_monitors!(nodes[1], 1);
4963
4964         mine_transaction(&nodes[1], &local_txn[0]);
4965         check_added_monitors!(nodes[1], 1);
4966         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
4967         let events = nodes[1].node.get_and_clear_pending_msg_events();
4968         match events[0] {
4969                 MessageSendEvent::UpdateHTLCs { .. } => {},
4970                 _ => panic!("Unexpected event"),
4971         }
4972         match events[1] {
4973                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4974                 _ => panic!("Unexepected event"),
4975         }
4976         let node_tx = {
4977                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
4978                 assert_eq!(node_txn.len(), 1);
4979                 assert_eq!(node_txn[0].input.len(), 1);
4980                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4981                 check_spends!(node_txn[0], local_txn[0]);
4982                 node_txn[0].clone()
4983         };
4984
4985         mine_transaction(&nodes[1], &node_tx);
4986         connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1);
4987
4988         // Verify that B is able to spend its own HTLC-Success tx thanks to spendable output event given back by its ChannelMonitor
4989         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4990         assert_eq!(spend_txn.len(), 1);
4991         assert_eq!(spend_txn[0].input.len(), 1);
4992         check_spends!(spend_txn[0], node_tx);
4993         assert_eq!(spend_txn[0].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
4994 }
4995
4996 fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, announce_latest: bool) {
4997         // Test that we fail backwards the full set of HTLCs we need to when remote broadcasts an
4998         // unrevoked commitment transaction.
4999         // This includes HTLCs which were below the dust threshold as well as HTLCs which were awaiting
5000         // a remote RAA before they could be failed backwards (and combinations thereof).
5001         // We also test duplicate-hash HTLCs by adding two nodes on each side of the target nodes which
5002         // use the same payment hashes.
5003         // Thus, we use a six-node network:
5004         //
5005         // A \         / E
5006         //    - C - D -
5007         // B /         \ F
5008         // And test where C fails back to A/B when D announces its latest commitment transaction
5009         let chanmon_cfgs = create_chanmon_cfgs(6);
5010         let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
5011         // When this test was written, the default base fee floated based on the HTLC count.
5012         // It is now fixed, so we simply set the fee to the expected value here.
5013         let mut config = test_default_channel_config();
5014         config.channel_config.forwarding_fee_base_msat = 196;
5015         let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs,
5016                 &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
5017         let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
5018
5019         let _chan_0_2 = create_announced_chan_between_nodes(&nodes, 0, 2);
5020         let _chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
5021         let chan_2_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
5022         let chan_3_4 = create_announced_chan_between_nodes(&nodes, 3, 4);
5023         let chan_3_5  = create_announced_chan_between_nodes(&nodes, 3, 5);
5024
5025         // Rebalance and check output sanity...
5026         send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000);
5027         send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000);
5028         assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2);
5029
5030         let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id())
5031                 .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context.holder_dust_limit_satoshis;
5032         // 0th HTLC:
5033         let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5034         // 1st HTLC:
5035         let (_, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5036         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
5037         // 2nd HTLC:
5038         send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_1, nodes[5].node.create_inbound_payment_for_hash(payment_hash_1, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5039         // 3rd HTLC:
5040         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_2, nodes[5].node.create_inbound_payment_for_hash(payment_hash_2, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5041         // 4th HTLC:
5042         let (_, payment_hash_3, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5043         // 5th HTLC:
5044         let (_, payment_hash_4, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5045         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
5046         // 6th HTLC:
5047         send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_3, nodes[5].node.create_inbound_payment_for_hash(payment_hash_3, None, 7200, None).unwrap());
5048         // 7th HTLC:
5049         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_4, nodes[5].node.create_inbound_payment_for_hash(payment_hash_4, None, 7200, None).unwrap());
5050
5051         // 8th HTLC:
5052         let (_, payment_hash_5, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5053         // 9th HTLC:
5054         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
5055         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_5, nodes[5].node.create_inbound_payment_for_hash(payment_hash_5, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5056
5057         // 10th HTLC:
5058         let (_, payment_hash_6, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5059         // 11th HTLC:
5060         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
5061         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_6, nodes[5].node.create_inbound_payment_for_hash(payment_hash_6, None, 7200, None).unwrap());
5062
5063         // Double-check that six of the new HTLC were added
5064         // We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie,
5065         // with to_local and to_remote outputs, 8 outputs and 6 HTLCs not included).
5066         assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2).len(), 1);
5067         assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 8);
5068
5069         // Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go.
5070         // Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs
5071         nodes[4].node.fail_htlc_backwards(&payment_hash_1);
5072         nodes[4].node.fail_htlc_backwards(&payment_hash_3);
5073         nodes[4].node.fail_htlc_backwards(&payment_hash_5);
5074         nodes[4].node.fail_htlc_backwards(&payment_hash_6);
5075         check_added_monitors!(nodes[4], 0);
5076
5077         let failed_destinations = vec![
5078                 HTLCDestination::FailedPayment { payment_hash: payment_hash_1 },
5079                 HTLCDestination::FailedPayment { payment_hash: payment_hash_3 },
5080                 HTLCDestination::FailedPayment { payment_hash: payment_hash_5 },
5081                 HTLCDestination::FailedPayment { payment_hash: payment_hash_6 },
5082         ];
5083         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[4], failed_destinations);
5084         check_added_monitors!(nodes[4], 1);
5085
5086         let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id());
5087         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[0]);
5088         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[1]);
5089         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[2]);
5090         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[3]);
5091         commitment_signed_dance!(nodes[3], nodes[4], four_removes.commitment_signed, false);
5092
5093         // Fail 3rd below-dust and 7th above-dust HTLCs
5094         nodes[5].node.fail_htlc_backwards(&payment_hash_2);
5095         nodes[5].node.fail_htlc_backwards(&payment_hash_4);
5096         check_added_monitors!(nodes[5], 0);
5097
5098         let failed_destinations_2 = vec![
5099                 HTLCDestination::FailedPayment { payment_hash: payment_hash_2 },
5100                 HTLCDestination::FailedPayment { payment_hash: payment_hash_4 },
5101         ];
5102         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[5], failed_destinations_2);
5103         check_added_monitors!(nodes[5], 1);
5104
5105         let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id());
5106         nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[0]);
5107         nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]);
5108         commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false);
5109
5110         let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
5111
5112         // After 4 and 2 removes respectively above in nodes[4] and nodes[5], nodes[3] should receive 6 PaymentForwardedFailed events
5113         let failed_destinations_3 = vec![
5114                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5115                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5116                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5117                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5118                 HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
5119                 HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
5120         ];
5121         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations_3);
5122         check_added_monitors!(nodes[3], 1);
5123         let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
5124         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]);
5125         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[1]);
5126         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[2]);
5127         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[3]);
5128         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[4]);
5129         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[5]);
5130         if deliver_last_raa {
5131                 commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false);
5132         } else {
5133                 let _cs_last_raa = commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false, true, false, true);
5134         }
5135
5136         // D's latest commitment transaction now contains 1st + 2nd + 9th HTLCs (implicitly, they're
5137         // below the dust limit) and the 5th + 6th + 11th HTLCs. It has failed back the 0th, 3rd, 4th,
5138         // 7th, 8th, and 10th, but as we haven't yet delivered the final RAA to C, the fails haven't
5139         // propagated back to A/B yet (and D has two unrevoked commitment transactions).
5140         //
5141         // We now broadcast the latest commitment transaction, which *should* result in failures for
5142         // the 0th, 1st, 2nd, 3rd, 4th, 7th, 8th, 9th, and 10th HTLCs, ie all the below-dust HTLCs and
5143         // the non-broadcast above-dust HTLCs.
5144         //
5145         // Alternatively, we may broadcast the previous commitment transaction, which should only
5146         // result in failures for the below-dust HTLCs, ie the 0th, 1st, 2nd, 3rd, 9th, and 10th HTLCs.
5147         let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
5148
5149         if announce_latest {
5150                 mine_transaction(&nodes[2], &ds_last_commitment_tx[0]);
5151         } else {
5152                 mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]);
5153         }
5154         let events = nodes[2].node.get_and_clear_pending_events();
5155         let close_event = if deliver_last_raa {
5156                 assert_eq!(events.len(), 2 + 6);
5157                 events.last().clone().unwrap()
5158         } else {
5159                 assert_eq!(events.len(), 1);
5160                 events.last().clone().unwrap()
5161         };
5162         match close_event {
5163                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
5164                 _ => panic!("Unexpected event"),
5165         }
5166
5167         connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
5168         check_closed_broadcast!(nodes[2], true);
5169         if deliver_last_raa {
5170                 expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
5171
5172                 let expected_destinations: Vec<HTLCDestination> = repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect();
5173                 expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations);
5174         } else {
5175                 let expected_destinations: Vec<HTLCDestination> = if announce_latest {
5176                         repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(9).collect()
5177                 } else {
5178                         repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(6).collect()
5179                 };
5180
5181                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], expected_destinations);
5182         }
5183         check_added_monitors!(nodes[2], 3);
5184
5185         let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events();
5186         assert_eq!(cs_msgs.len(), 2);
5187         let mut a_done = false;
5188         for msg in cs_msgs {
5189                 match msg {
5190                         MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
5191                                 // Both under-dust HTLCs and the one above-dust HTLC that we had already failed
5192                                 // should be failed-backwards here.
5193                                 let target = if *node_id == nodes[0].node.get_our_node_id() {
5194                                         // If announce_latest, expect 0th, 1st, 4th, 8th, 10th HTLCs, else only 0th, 1st, 10th below-dust HTLCs
5195                                         for htlc in &updates.update_fail_htlcs {
5196                                                 assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 6 || if announce_latest { htlc.htlc_id == 3 || htlc.htlc_id == 5 } else { false });
5197                                         }
5198                                         assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 5 } else { 3 });
5199                                         assert!(!a_done);
5200                                         a_done = true;
5201                                         &nodes[0]
5202                                 } else {
5203                                         // If announce_latest, expect 2nd, 3rd, 7th, 9th HTLCs, else only 2nd, 3rd, 9th below-dust HTLCs
5204                                         for htlc in &updates.update_fail_htlcs {
5205                                                 assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 5 || if announce_latest { htlc.htlc_id == 4 } else { false });
5206                                         }
5207                                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
5208                                         assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 4 } else { 3 });
5209                                         &nodes[1]
5210                                 };
5211                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
5212                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[1]);
5213                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[2]);
5214                                 if announce_latest {
5215                                         target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[3]);
5216                                         if *node_id == nodes[0].node.get_our_node_id() {
5217                                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[4]);
5218                                         }
5219                                 }
5220                                 commitment_signed_dance!(target, nodes[2], updates.commitment_signed, false, true);
5221                         },
5222                         _ => panic!("Unexpected event"),
5223                 }
5224         }
5225
5226         let as_events = nodes[0].node.get_and_clear_pending_events();
5227         assert_eq!(as_events.len(), if announce_latest { 10 } else { 6 });
5228         let mut as_failds = HashSet::new();
5229         let mut as_updates = 0;
5230         for event in as_events.iter() {
5231                 if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
5232                         assert!(as_failds.insert(*payment_hash));
5233                         if *payment_hash != payment_hash_2 {
5234                                 assert_eq!(*payment_failed_permanently, deliver_last_raa);
5235                         } else {
5236                                 assert!(!payment_failed_permanently);
5237                         }
5238                         if let PathFailure::OnPath { network_update: Some(_) } = failure {
5239                                 as_updates += 1;
5240                         }
5241                 } else if let &Event::PaymentFailed { .. } = event {
5242                 } else { panic!("Unexpected event"); }
5243         }
5244         assert!(as_failds.contains(&payment_hash_1));
5245         assert!(as_failds.contains(&payment_hash_2));
5246         if announce_latest {
5247                 assert!(as_failds.contains(&payment_hash_3));
5248                 assert!(as_failds.contains(&payment_hash_5));
5249         }
5250         assert!(as_failds.contains(&payment_hash_6));
5251
5252         let bs_events = nodes[1].node.get_and_clear_pending_events();
5253         assert_eq!(bs_events.len(), if announce_latest { 8 } else { 6 });
5254         let mut bs_failds = HashSet::new();
5255         let mut bs_updates = 0;
5256         for event in bs_events.iter() {
5257                 if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
5258                         assert!(bs_failds.insert(*payment_hash));
5259                         if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 {
5260                                 assert_eq!(*payment_failed_permanently, deliver_last_raa);
5261                         } else {
5262                                 assert!(!payment_failed_permanently);
5263                         }
5264                         if let PathFailure::OnPath { network_update: Some(_) } = failure {
5265                                 bs_updates += 1;
5266                         }
5267                 } else if let &Event::PaymentFailed { .. } = event {
5268                 } else { panic!("Unexpected event"); }
5269         }
5270         assert!(bs_failds.contains(&payment_hash_1));
5271         assert!(bs_failds.contains(&payment_hash_2));
5272         if announce_latest {
5273                 assert!(bs_failds.contains(&payment_hash_4));
5274         }
5275         assert!(bs_failds.contains(&payment_hash_5));
5276
5277         // For each HTLC which was not failed-back by normal process (ie deliver_last_raa), we should
5278         // get a NetworkUpdate. A should have gotten 4 HTLCs which were failed-back due to
5279         // unknown-preimage-etc, B should have gotten 2. Thus, in the
5280         // announce_latest && deliver_last_raa case, we should have 5-4=1 and 4-2=2 NetworkUpdates.
5281         assert_eq!(as_updates, if deliver_last_raa { 1 } else if !announce_latest { 3 } else { 5 });
5282         assert_eq!(bs_updates, if deliver_last_raa { 2 } else if !announce_latest { 3 } else { 4 });
5283 }
5284
5285 #[test]
5286 fn test_fail_backwards_latest_remote_announce_a() {
5287         do_test_fail_backwards_unrevoked_remote_announce(false, true);
5288 }
5289
5290 #[test]
5291 fn test_fail_backwards_latest_remote_announce_b() {
5292         do_test_fail_backwards_unrevoked_remote_announce(true, true);
5293 }
5294
5295 #[test]
5296 fn test_fail_backwards_previous_remote_announce() {
5297         do_test_fail_backwards_unrevoked_remote_announce(false, false);
5298         // Note that true, true doesn't make sense as it implies we announce a revoked state, which is
5299         // tested for in test_commitment_revoked_fail_backward_exhaustive()
5300 }
5301
5302 #[test]
5303 fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
5304         let chanmon_cfgs = create_chanmon_cfgs(2);
5305         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5306         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5307         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5308
5309         // Create some initial channels
5310         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5311
5312         let (_, our_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
5313         let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
5314         assert_eq!(local_txn[0].input.len(), 1);
5315         check_spends!(local_txn[0], chan_1.3);
5316
5317         // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
5318         mine_transaction(&nodes[0], &local_txn[0]);
5319         check_closed_broadcast!(nodes[0], true);
5320         check_added_monitors!(nodes[0], 1);
5321         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
5322         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
5323
5324         let htlc_timeout = {
5325                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5326                 assert_eq!(node_txn.len(), 1);
5327                 assert_eq!(node_txn[0].input.len(), 1);
5328                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5329                 check_spends!(node_txn[0], local_txn[0]);
5330                 node_txn[0].clone()
5331         };
5332
5333         mine_transaction(&nodes[0], &htlc_timeout);
5334         connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
5335         expect_payment_failed!(nodes[0], our_payment_hash, false);
5336
5337         // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
5338         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
5339         assert_eq!(spend_txn.len(), 3);
5340         check_spends!(spend_txn[0], local_txn[0]);
5341         assert_eq!(spend_txn[1].input.len(), 1);
5342         check_spends!(spend_txn[1], htlc_timeout);
5343         assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5344         assert_eq!(spend_txn[2].input.len(), 2);
5345         check_spends!(spend_txn[2], local_txn[0], htlc_timeout);
5346         assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 ||
5347                 spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32);
5348 }
5349
5350 #[test]
5351 fn test_key_derivation_params() {
5352         // This test is a copy of test_dynamic_spendable_outputs_local_htlc_timeout_tx, with a key
5353         // manager rotation to test that `channel_keys_id` returned in
5354         // [`SpendableOutputDescriptor::DelayedPaymentOutput`] let us re-derive the channel key set to
5355         // then derive a `delayed_payment_key`.
5356
5357         let chanmon_cfgs = create_chanmon_cfgs(3);
5358
5359         // We manually create the node configuration to backup the seed.
5360         let seed = [42; 32];
5361         let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
5362         let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager);
5363         let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[0].logger));
5364         let scorer = Mutex::new(test_utils::TestScorer::new());
5365         let router = test_utils::TestRouter::new(network_graph.clone(), &scorer);
5366         let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) };
5367         let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
5368         node_cfgs.remove(0);
5369         node_cfgs.insert(0, node);
5370
5371         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
5372         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
5373
5374         // Create some initial channels
5375         // Create a dummy channel to advance index by one and thus test re-derivation correctness
5376         // for node 0
5377         let chan_0 = create_announced_chan_between_nodes(&nodes, 0, 2);
5378         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5379         assert_ne!(chan_0.3.output[0].script_pubkey, chan_1.3.output[0].script_pubkey);
5380
5381         // Ensure all nodes are at the same height
5382         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
5383         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
5384         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
5385         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
5386
5387         let (_, our_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
5388         let local_txn_0 = get_local_commitment_txn!(nodes[0], chan_0.2);
5389         let local_txn_1 = get_local_commitment_txn!(nodes[0], chan_1.2);
5390         assert_eq!(local_txn_1[0].input.len(), 1);
5391         check_spends!(local_txn_1[0], chan_1.3);
5392
5393         // We check funding pubkey are unique
5394         let (from_0_funding_key_0, from_0_funding_key_1) = (PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][36..69]));
5395         let (from_1_funding_key_0, from_1_funding_key_1) = (PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][36..69]));
5396         if from_0_funding_key_0 == from_1_funding_key_0
5397             || from_0_funding_key_0 == from_1_funding_key_1
5398             || from_0_funding_key_1 == from_1_funding_key_0
5399             || from_0_funding_key_1 == from_1_funding_key_1 {
5400                 panic!("Funding pubkeys aren't unique");
5401         }
5402
5403         // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
5404         mine_transaction(&nodes[0], &local_txn_1[0]);
5405         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
5406         check_closed_broadcast!(nodes[0], true);
5407         check_added_monitors!(nodes[0], 1);
5408         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
5409
5410         let htlc_timeout = {
5411                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5412                 assert_eq!(node_txn.len(), 1);
5413                 assert_eq!(node_txn[0].input.len(), 1);
5414                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5415                 check_spends!(node_txn[0], local_txn_1[0]);
5416                 node_txn[0].clone()
5417         };
5418
5419         mine_transaction(&nodes[0], &htlc_timeout);
5420         connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
5421         expect_payment_failed!(nodes[0], our_payment_hash, false);
5422
5423         // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
5424         let new_keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
5425         let spend_txn = check_spendable_outputs!(nodes[0], new_keys_manager);
5426         assert_eq!(spend_txn.len(), 3);
5427         check_spends!(spend_txn[0], local_txn_1[0]);
5428         assert_eq!(spend_txn[1].input.len(), 1);
5429         check_spends!(spend_txn[1], htlc_timeout);
5430         assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5431         assert_eq!(spend_txn[2].input.len(), 2);
5432         check_spends!(spend_txn[2], local_txn_1[0], htlc_timeout);
5433         assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 ||
5434                 spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32);
5435 }
5436
5437 #[test]
5438 fn test_static_output_closing_tx() {
5439         let chanmon_cfgs = create_chanmon_cfgs(2);
5440         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5441         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5442         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5443
5444         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5445
5446         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
5447         let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
5448
5449         mine_transaction(&nodes[0], &closing_tx);
5450         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
5451         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
5452
5453         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
5454         assert_eq!(spend_txn.len(), 1);
5455         check_spends!(spend_txn[0], closing_tx);
5456
5457         mine_transaction(&nodes[1], &closing_tx);
5458         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
5459         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5460
5461         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5462         assert_eq!(spend_txn.len(), 1);
5463         check_spends!(spend_txn[0], closing_tx);
5464 }
5465
5466 fn do_htlc_claim_local_commitment_only(use_dust: bool) {
5467         let chanmon_cfgs = create_chanmon_cfgs(2);
5468         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5469         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5470         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5471         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5472
5473         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3_000_000 });
5474
5475         // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being
5476         // present in B's local commitment transaction, but none of A's commitment transactions.
5477         nodes[1].node.claim_funds(payment_preimage);
5478         check_added_monitors!(nodes[1], 1);
5479         expect_payment_claimed!(nodes[1], payment_hash, if use_dust { 50000 } else { 3_000_000 });
5480
5481         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5482         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
5483         expect_payment_sent_without_paths!(nodes[0], payment_preimage);
5484
5485         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
5486         check_added_monitors!(nodes[0], 1);
5487         let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5488         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0);
5489         check_added_monitors!(nodes[1], 1);
5490
5491         let starting_block = nodes[1].best_block_info();
5492         let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
5493         for _ in starting_block.1 + 1..TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + starting_block.1 + 2 {
5494                 connect_block(&nodes[1], &block);
5495                 block.header.prev_blockhash = block.block_hash();
5496         }
5497         test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
5498         check_closed_broadcast!(nodes[1], true);
5499         check_added_monitors!(nodes[1], 1);
5500         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
5501 }
5502
5503 fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
5504         let chanmon_cfgs = create_chanmon_cfgs(2);
5505         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5506         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5507         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5508         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5509
5510         let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], if use_dust { 50000 } else { 3000000 });
5511         nodes[0].node.send_payment_with_route(&route, payment_hash,
5512                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
5513         check_added_monitors!(nodes[0], 1);
5514
5515         let _as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5516
5517         // As far as A is concerned, the HTLC is now present only in the latest remote commitment
5518         // transaction, however it is not in A's latest local commitment, so we can just broadcast that
5519         // to "time out" the HTLC.
5520
5521         let starting_block = nodes[1].best_block_info();
5522         let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
5523
5524         for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + starting_block.1 + 2 {
5525                 connect_block(&nodes[0], &block);
5526                 block.header.prev_blockhash = block.block_hash();
5527         }
5528         test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
5529         check_closed_broadcast!(nodes[0], true);
5530         check_added_monitors!(nodes[0], 1);
5531         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
5532 }
5533
5534 fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
5535         let chanmon_cfgs = create_chanmon_cfgs(3);
5536         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
5537         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
5538         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
5539         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5540
5541         // Fail the payment, but don't deliver A's final RAA, resulting in the HTLC only being present
5542         // in B's previous (unrevoked) commitment transaction, but none of A's commitment transactions.
5543         // Also optionally test that we *don't* fail the channel in case the commitment transaction was
5544         // actually revoked.
5545         let htlc_value = if use_dust { 50000 } else { 3000000 };
5546         let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
5547         nodes[1].node.fail_htlc_backwards(&our_payment_hash);
5548         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
5549         check_added_monitors!(nodes[1], 1);
5550
5551         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5552         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
5553         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
5554         check_added_monitors!(nodes[0], 1);
5555         let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5556         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0);
5557         check_added_monitors!(nodes[1], 1);
5558         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.1);
5559         check_added_monitors!(nodes[1], 1);
5560         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
5561
5562         if check_revoke_no_close {
5563                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
5564                 check_added_monitors!(nodes[0], 1);
5565         }
5566
5567         let starting_block = nodes[1].best_block_info();
5568         let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
5569         for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 2 {
5570                 connect_block(&nodes[0], &block);
5571                 block.header.prev_blockhash = block.block_hash();
5572         }
5573         if !check_revoke_no_close {
5574                 test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
5575                 check_closed_broadcast!(nodes[0], true);
5576                 check_added_monitors!(nodes[0], 1);
5577                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
5578         } else {
5579                 expect_payment_failed!(nodes[0], our_payment_hash, true);
5580         }
5581 }
5582
5583 // Test that we close channels on-chain when broadcastable HTLCs reach their timeout window.
5584 // There are only a few cases to test here:
5585 //  * its not really normative behavior, but we test that below-dust HTLCs "included" in
5586 //    broadcastable commitment transactions result in channel closure,
5587 //  * its included in an unrevoked-but-previous remote commitment transaction,
5588 //  * its included in the latest remote or local commitment transactions.
5589 // We test each of the three possible commitment transactions individually and use both dust and
5590 // non-dust HTLCs.
5591 // Note that we don't bother testing both outbound and inbound HTLC failures for each case, and we
5592 // assume they are handled the same across all six cases, as both outbound and inbound failures are
5593 // tested for at least one of the cases in other tests.
5594 #[test]
5595 fn htlc_claim_single_commitment_only_a() {
5596         do_htlc_claim_local_commitment_only(true);
5597         do_htlc_claim_local_commitment_only(false);
5598
5599         do_htlc_claim_current_remote_commitment_only(true);
5600         do_htlc_claim_current_remote_commitment_only(false);
5601 }
5602
5603 #[test]
5604 fn htlc_claim_single_commitment_only_b() {
5605         do_htlc_claim_previous_remote_commitment_only(true, false);
5606         do_htlc_claim_previous_remote_commitment_only(false, false);
5607         do_htlc_claim_previous_remote_commitment_only(true, true);
5608         do_htlc_claim_previous_remote_commitment_only(false, true);
5609 }
5610
5611 #[test]
5612 #[should_panic]
5613 fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be on its own as we are catching a panic
5614         let chanmon_cfgs = create_chanmon_cfgs(2);
5615         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5616         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5617         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5618         // Force duplicate randomness for every get-random call
5619         for node in nodes.iter() {
5620                 *node.keys_manager.override_random_bytes.lock().unwrap() = Some([0; 32]);
5621         }
5622
5623         // BOLT #2 spec: Sending node must ensure temporary_channel_id is unique from any other channel ID with the same peer.
5624         let channel_value_satoshis=10000;
5625         let push_msat=10001;
5626         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).unwrap();
5627         let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5628         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel);
5629         get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
5630
5631         // Create a second channel with the same random values. This used to panic due to a colliding
5632         // channel_id, but now panics due to a colliding outbound SCID alias.
5633         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_err());
5634 }
5635
5636 #[test]
5637 fn bolt2_open_channel_sending_node_checks_part2() {
5638         let chanmon_cfgs = create_chanmon_cfgs(2);
5639         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5640         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5641         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5642
5643         // BOLT #2 spec: Sending node must set funding_satoshis to less than 2^24 satoshis
5644         let channel_value_satoshis=2^24;
5645         let push_msat=10001;
5646         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_err());
5647
5648         // BOLT #2 spec: Sending node must set push_msat to equal or less than 1000 * funding_satoshis
5649         let channel_value_satoshis=10000;
5650         // Test when push_msat is equal to 1000 * funding_satoshis.
5651         let push_msat=1000*channel_value_satoshis+1;
5652         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_err());
5653
5654         // BOLT #2 spec: Sending node must set set channel_reserve_satoshis greater than or equal to dust_limit_satoshis
5655         let channel_value_satoshis=10000;
5656         let push_msat=10001;
5657         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_ok()); //Create a valid channel
5658         let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5659         assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.dust_limit_satoshis);
5660
5661         // BOLT #2 spec: Sending node must set undefined bits in channel_flags to 0
5662         // Only the least-significant bit of channel_flags is currently defined resulting in channel_flags only having one of two possible states 0 or 1
5663         assert!(node0_to_1_send_open_channel.channel_flags<=1);
5664
5665         // BOLT #2 spec: Sending node should set to_self_delay sufficient to ensure the sender can irreversibly spend a commitment transaction output, in case of misbehaviour by the receiver.
5666         assert!(BREAKDOWN_TIMEOUT>0);
5667         assert!(node0_to_1_send_open_channel.to_self_delay==BREAKDOWN_TIMEOUT);
5668
5669         // BOLT #2 spec: Sending node must ensure the chain_hash value identifies the chain it wishes to open the channel within.
5670         let chain_hash=genesis_block(Network::Testnet).header.block_hash();
5671         assert_eq!(node0_to_1_send_open_channel.chain_hash,chain_hash);
5672
5673         // BOLT #2 spec: Sending node must set funding_pubkey, revocation_basepoint, htlc_basepoint, payment_basepoint, and delayed_payment_basepoint to valid DER-encoded, compressed, secp256k1 pubkeys.
5674         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.funding_pubkey.serialize()).is_ok());
5675         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.revocation_basepoint.serialize()).is_ok());
5676         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.htlc_basepoint.serialize()).is_ok());
5677         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.payment_point.serialize()).is_ok());
5678         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.delayed_payment_basepoint.serialize()).is_ok());
5679 }
5680
5681 #[test]
5682 fn bolt2_open_channel_sane_dust_limit() {
5683         let chanmon_cfgs = create_chanmon_cfgs(2);
5684         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5685         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5686         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5687
5688         let channel_value_satoshis=1000000;
5689         let push_msat=10001;
5690         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).unwrap();
5691         let mut node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5692         node0_to_1_send_open_channel.dust_limit_satoshis = 547;
5693         node0_to_1_send_open_channel.channel_reserve_satoshis = 100001;
5694
5695         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel);
5696         let events = nodes[1].node.get_and_clear_pending_msg_events();
5697         let err_msg = match events[0] {
5698                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
5699                         msg.clone()
5700                 },
5701                 _ => panic!("Unexpected event"),
5702         };
5703         assert_eq!(err_msg.data, "dust_limit_satoshis (547) is greater than the implementation limit (546)");
5704 }
5705
5706 // Test that if we fail to send an HTLC that is being freed from the holding cell, and the HTLC
5707 // originated from our node, its failure is surfaced to the user. We trigger this failure to
5708 // free the HTLC by increasing our fee while the HTLC is in the holding cell such that the HTLC
5709 // is no longer affordable once it's freed.
5710 #[test]
5711 fn test_fail_holding_cell_htlc_upon_free() {
5712         let chanmon_cfgs = create_chanmon_cfgs(2);
5713         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5714         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5715         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5716         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
5717
5718         // First nodes[0] generates an update_fee, setting the channel's
5719         // pending_update_fee.
5720         {
5721                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
5722                 *feerate_lock += 20;
5723         }
5724         nodes[0].node.timer_tick_occurred();
5725         check_added_monitors!(nodes[0], 1);
5726
5727         let events = nodes[0].node.get_and_clear_pending_msg_events();
5728         assert_eq!(events.len(), 1);
5729         let (update_msg, commitment_signed) = match events[0] {
5730                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
5731                         (update_fee.as_ref(), commitment_signed)
5732                 },
5733                 _ => panic!("Unexpected event"),
5734         };
5735
5736         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
5737
5738         let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5739         let channel_reserve = chan_stat.channel_reserve_msat;
5740         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
5741         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
5742
5743         // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
5744         let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
5745         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
5746
5747         // Send a payment which passes reserve checks but gets stuck in the holding cell.
5748         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
5749                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
5750         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5751         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send);
5752
5753         // Flush the pending fee update.
5754         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
5755         let (as_revoke_and_ack, _) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5756         check_added_monitors!(nodes[1], 1);
5757         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack);
5758         check_added_monitors!(nodes[0], 1);
5759
5760         // Upon receipt of the RAA, there will be an attempt to resend the holding cell
5761         // HTLC, but now that the fee has been raised the payment will now fail, causing
5762         // us to surface its failure to the user.
5763         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5764         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
5765         nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", hex::encode(chan.2)), 1);
5766
5767         // Check that the payment failed to be sent out.
5768         let events = nodes[0].node.get_and_clear_pending_events();
5769         assert_eq!(events.len(), 2);
5770         match &events[0] {
5771                 &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => {
5772                         assert_eq!(PaymentId(our_payment_hash.0), *payment_id.as_ref().unwrap());
5773                         assert_eq!(our_payment_hash.clone(), *payment_hash);
5774                         assert_eq!(*payment_failed_permanently, false);
5775                         assert_eq!(*short_channel_id, Some(route.paths[0].hops[0].short_channel_id));
5776                 },
5777                 _ => panic!("Unexpected event"),
5778         }
5779         match &events[1] {
5780                 &Event::PaymentFailed { ref payment_hash, .. } => {
5781                         assert_eq!(our_payment_hash.clone(), *payment_hash);
5782                 },
5783                 _ => panic!("Unexpected event"),
5784         }
5785 }
5786
5787 // Test that if multiple HTLCs are released from the holding cell and one is
5788 // valid but the other is no longer valid upon release, the valid HTLC can be
5789 // successfully completed while the other one fails as expected.
5790 #[test]
5791 fn test_free_and_fail_holding_cell_htlcs() {
5792         let chanmon_cfgs = create_chanmon_cfgs(2);
5793         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5794         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5795         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5796         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
5797
5798         // First nodes[0] generates an update_fee, setting the channel's
5799         // pending_update_fee.
5800         {
5801                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
5802                 *feerate_lock += 200;
5803         }
5804         nodes[0].node.timer_tick_occurred();
5805         check_added_monitors!(nodes[0], 1);
5806
5807         let events = nodes[0].node.get_and_clear_pending_msg_events();
5808         assert_eq!(events.len(), 1);
5809         let (update_msg, commitment_signed) = match events[0] {
5810                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
5811                         (update_fee.as_ref(), commitment_signed)
5812                 },
5813                 _ => panic!("Unexpected event"),
5814         };
5815
5816         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
5817
5818         let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5819         let channel_reserve = chan_stat.channel_reserve_msat;
5820         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
5821         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
5822
5823         // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
5824         let amt_1 = 20000;
5825         let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features) - amt_1;
5826         let (route_1, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_1);
5827         let (route_2, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_2);
5828
5829         // Send 2 payments which pass reserve checks but get stuck in the holding cell.
5830         nodes[0].node.send_payment_with_route(&route_1, payment_hash_1,
5831                 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
5832         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5833         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1);
5834         let payment_id_2 = PaymentId(nodes[0].keys_manager.get_secure_random_bytes());
5835         nodes[0].node.send_payment_with_route(&route_2, payment_hash_2,
5836                 RecipientOnionFields::secret_only(payment_secret_2), payment_id_2).unwrap();
5837         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5838         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1 + amt_2);
5839
5840         // Flush the pending fee update.
5841         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
5842         let (revoke_and_ack, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5843         check_added_monitors!(nodes[1], 1);
5844         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_and_ack);
5845         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
5846         check_added_monitors!(nodes[0], 2);
5847
5848         // Upon receipt of the RAA, there will be an attempt to resend the holding cell HTLCs,
5849         // but now that the fee has been raised the second payment will now fail, causing us
5850         // to surface its failure to the user. The first payment should succeed.
5851         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5852         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
5853         nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", hex::encode(chan.2)), 1);
5854
5855         // Check that the second payment failed to be sent out.
5856         let events = nodes[0].node.get_and_clear_pending_events();
5857         assert_eq!(events.len(), 2);
5858         match &events[0] {
5859                 &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => {
5860                         assert_eq!(payment_id_2, *payment_id.as_ref().unwrap());
5861                         assert_eq!(payment_hash_2.clone(), *payment_hash);
5862                         assert_eq!(*payment_failed_permanently, false);
5863                         assert_eq!(*short_channel_id, Some(route_2.paths[0].hops[0].short_channel_id));
5864                 },
5865                 _ => panic!("Unexpected event"),
5866         }
5867         match &events[1] {
5868                 &Event::PaymentFailed { ref payment_hash, .. } => {
5869                         assert_eq!(payment_hash_2.clone(), *payment_hash);
5870                 },
5871                 _ => panic!("Unexpected event"),
5872         }
5873
5874         // Complete the first payment and the RAA from the fee update.
5875         let (payment_event, send_raa_event) = {
5876                 let mut msgs = nodes[0].node.get_and_clear_pending_msg_events();
5877                 assert_eq!(msgs.len(), 2);
5878                 (SendEvent::from_event(msgs.remove(0)), msgs.remove(0))
5879         };
5880         let raa = match send_raa_event {
5881                 MessageSendEvent::SendRevokeAndACK { msg, .. } => msg,
5882                 _ => panic!("Unexpected event"),
5883         };
5884         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
5885         check_added_monitors!(nodes[1], 1);
5886         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
5887         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
5888         let events = nodes[1].node.get_and_clear_pending_events();
5889         assert_eq!(events.len(), 1);
5890         match events[0] {
5891                 Event::PendingHTLCsForwardable { .. } => {},
5892                 _ => panic!("Unexpected event"),
5893         }
5894         nodes[1].node.process_pending_htlc_forwards();
5895         let events = nodes[1].node.get_and_clear_pending_events();
5896         assert_eq!(events.len(), 1);
5897         match events[0] {
5898                 Event::PaymentClaimable { .. } => {},
5899                 _ => panic!("Unexpected event"),
5900         }
5901         nodes[1].node.claim_funds(payment_preimage_1);
5902         check_added_monitors!(nodes[1], 1);
5903         expect_payment_claimed!(nodes[1], payment_hash_1, amt_1);
5904
5905         let update_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5906         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msgs.update_fulfill_htlcs[0]);
5907         commitment_signed_dance!(nodes[0], nodes[1], update_msgs.commitment_signed, false, true);
5908         expect_payment_sent!(nodes[0], payment_preimage_1);
5909 }
5910
5911 // Test that if we fail to forward an HTLC that is being freed from the holding cell that the
5912 // HTLC is failed backwards. We trigger this failure to forward the freed HTLC by increasing
5913 // our fee while the HTLC is in the holding cell such that the HTLC is no longer affordable
5914 // once it's freed.
5915 #[test]
5916 fn test_fail_holding_cell_htlc_upon_free_multihop() {
5917         let chanmon_cfgs = create_chanmon_cfgs(3);
5918         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
5919         // Avoid having to include routing fees in calculations
5920         let mut config = test_default_channel_config();
5921         config.channel_config.forwarding_fee_base_msat = 0;
5922         config.channel_config.forwarding_fee_proportional_millionths = 0;
5923         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
5924         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
5925         let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
5926         let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000);
5927
5928         // First nodes[1] generates an update_fee, setting the channel's
5929         // pending_update_fee.
5930         {
5931                 let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
5932                 *feerate_lock += 20;
5933         }
5934         nodes[1].node.timer_tick_occurred();
5935         check_added_monitors!(nodes[1], 1);
5936
5937         let events = nodes[1].node.get_and_clear_pending_msg_events();
5938         assert_eq!(events.len(), 1);
5939         let (update_msg, commitment_signed) = match events[0] {
5940                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
5941                         (update_fee.as_ref(), commitment_signed)
5942                 },
5943                 _ => panic!("Unexpected event"),
5944         };
5945
5946         nodes[2].node.handle_update_fee(&nodes[1].node.get_our_node_id(), update_msg.unwrap());
5947
5948         let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan_0_1.2);
5949         let channel_reserve = chan_stat.channel_reserve_msat;
5950         let feerate = get_feerate!(nodes[0], nodes[1], chan_0_1.2);
5951         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_0_1.2);
5952
5953         // Send a payment which passes reserve checks but gets stuck in the holding cell.
5954         let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
5955         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], max_can_send);
5956         let payment_event = {
5957                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
5958                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
5959                 check_added_monitors!(nodes[0], 1);
5960
5961                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
5962                 assert_eq!(events.len(), 1);
5963
5964                 SendEvent::from_event(events.remove(0))
5965         };
5966         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
5967         check_added_monitors!(nodes[1], 0);
5968         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
5969         expect_pending_htlcs_forwardable!(nodes[1]);
5970
5971         chan_stat = get_channel_value_stat!(nodes[1], nodes[2], chan_1_2.2);
5972         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send);
5973
5974         // Flush the pending fee update.
5975         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
5976         let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
5977         check_added_monitors!(nodes[2], 1);
5978         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &raa);
5979         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &commitment_signed);
5980         check_added_monitors!(nodes[1], 2);
5981
5982         // A final RAA message is generated to finalize the fee update.
5983         let events = nodes[1].node.get_and_clear_pending_msg_events();
5984         assert_eq!(events.len(), 1);
5985
5986         let raa_msg = match &events[0] {
5987                 &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => {
5988                         msg.clone()
5989                 },
5990                 _ => panic!("Unexpected event"),
5991         };
5992
5993         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa_msg);
5994         check_added_monitors!(nodes[2], 1);
5995         assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
5996
5997         // nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process.
5998         let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events();
5999         assert_eq!(process_htlc_forwards_event.len(), 2);
6000         match &process_htlc_forwards_event[0] {
6001                 &Event::PendingHTLCsForwardable { .. } => {},
6002                 _ => panic!("Unexpected event"),
6003         }
6004
6005         // In response, we call ChannelManager's process_pending_htlc_forwards
6006         nodes[1].node.process_pending_htlc_forwards();
6007         check_added_monitors!(nodes[1], 1);
6008
6009         // This causes the HTLC to be failed backwards.
6010         let fail_event = nodes[1].node.get_and_clear_pending_msg_events();
6011         assert_eq!(fail_event.len(), 1);
6012         let (fail_msg, commitment_signed) = match &fail_event[0] {
6013                 &MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
6014                         assert_eq!(updates.update_add_htlcs.len(), 0);
6015                         assert_eq!(updates.update_fulfill_htlcs.len(), 0);
6016                         assert_eq!(updates.update_fail_malformed_htlcs.len(), 0);
6017                         assert_eq!(updates.update_fail_htlcs.len(), 1);
6018                         (updates.update_fail_htlcs[0].clone(), updates.commitment_signed.clone())
6019                 },
6020                 _ => panic!("Unexpected event"),
6021         };
6022
6023         // Pass the failure messages back to nodes[0].
6024         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg);
6025         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
6026
6027         // Complete the HTLC failure+removal process.
6028         let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6029         check_added_monitors!(nodes[0], 1);
6030         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
6031         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed);
6032         check_added_monitors!(nodes[1], 2);
6033         let final_raa_event = nodes[1].node.get_and_clear_pending_msg_events();
6034         assert_eq!(final_raa_event.len(), 1);
6035         let raa = match &final_raa_event[0] {
6036                 &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => msg.clone(),
6037                 _ => panic!("Unexpected event"),
6038         };
6039         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa);
6040         expect_payment_failed_with_update!(nodes[0], our_payment_hash, false, chan_1_2.0.contents.short_channel_id, false);
6041         check_added_monitors!(nodes[0], 1);
6042 }
6043
6044 // BOLT 2 Requirements for the Sender when constructing and sending an update_add_htlc message.
6045 // BOLT 2 Requirement: MUST NOT offer amount_msat it cannot pay for in the remote commitment transaction at the current feerate_per_kw (see "Updating Fees") while maintaining its channel reserve.
6046 //TODO: I don't believe this is explicitly enforced when sending an HTLC but as the Fee aspect of the BOLT specs is in flux leaving this as a TODO.
6047
6048 #[test]
6049 fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() {
6050         //BOLT2 Requirement: MUST NOT offer amount_msat below the receiving node's htlc_minimum_msat (same validation check catches both of these)
6051         let chanmon_cfgs = create_chanmon_cfgs(2);
6052         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6053         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6054         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6055         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6056
6057         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6058         route.paths[0].hops[0].fee_msat = 100;
6059
6060         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6061                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6062                 ), true, APIError::ChannelUnavailable { .. }, {});
6063         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6064 }
6065
6066 #[test]
6067 fn test_update_add_htlc_bolt2_sender_zero_value_msat() {
6068         //BOLT2 Requirement: MUST offer amount_msat greater than 0.
6069         let chanmon_cfgs = create_chanmon_cfgs(2);
6070         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6071         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6072         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6073         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6074
6075         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6076         route.paths[0].hops[0].fee_msat = 0;
6077         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6078                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)),
6079                 true, APIError::ChannelUnavailable { ref err },
6080                 assert_eq!(err, "Cannot send 0-msat HTLC"));
6081
6082         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6083         nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send 0-msat HTLC", 1);
6084 }
6085
6086 #[test]
6087 fn test_update_add_htlc_bolt2_receiver_zero_value_msat() {
6088         //BOLT2 Requirement: MUST offer amount_msat greater than 0.
6089         let chanmon_cfgs = create_chanmon_cfgs(2);
6090         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6091         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6092         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6093         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6094
6095         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6096         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6097                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6098         check_added_monitors!(nodes[0], 1);
6099         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6100         updates.update_add_htlcs[0].amount_msat = 0;
6101
6102         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6103         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1);
6104         check_closed_broadcast!(nodes[1], true).unwrap();
6105         check_added_monitors!(nodes[1], 1);
6106         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() });
6107 }
6108
6109 #[test]
6110 fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() {
6111         //BOLT 2 Requirement: MUST set cltv_expiry less than 500000000.
6112         //It is enforced when constructing a route.
6113         let chanmon_cfgs = create_chanmon_cfgs(2);
6114         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6115         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6116         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6117         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
6118
6119         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0)
6120                 .with_bolt11_features(nodes[1].node.invoice_features()).unwrap();
6121         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000);
6122         route.paths[0].hops.last_mut().unwrap().cltv_expiry_delta = 500000001;
6123         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6124                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6125                 ), true, APIError::InvalidRoute { ref err },
6126                 assert_eq!(err, &"Channel CLTV overflowed?"));
6127 }
6128
6129 #[test]
6130 fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() {
6131         //BOLT 2 Requirement: if result would be offering more than the remote's max_accepted_htlcs HTLCs, in the remote commitment transaction: MUST NOT add an HTLC.
6132         //BOLT 2 Requirement: for the first HTLC it offers MUST set id to 0.
6133         //BOLT 2 Requirement: MUST increase the value of id by 1 for each successive offer.
6134         let chanmon_cfgs = create_chanmon_cfgs(2);
6135         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6136         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6137         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6138         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
6139         let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
6140                 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.counterparty_max_accepted_htlcs as u64;
6141
6142         // Fetch a route in advance as we will be unable to once we're unable to send.
6143         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6144         for i in 0..max_accepted_htlcs {
6145                 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6146                 let payment_event = {
6147                         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6148                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6149                         check_added_monitors!(nodes[0], 1);
6150
6151                         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6152                         assert_eq!(events.len(), 1);
6153                         if let MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate{ update_add_htlcs: ref htlcs, .. }, } = events[0] {
6154                                 assert_eq!(htlcs[0].htlc_id, i);
6155                         } else {
6156                                 assert!(false);
6157                         }
6158                         SendEvent::from_event(events.remove(0))
6159                 };
6160                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6161                 check_added_monitors!(nodes[1], 0);
6162                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6163
6164                 expect_pending_htlcs_forwardable!(nodes[1]);
6165                 expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 100000);
6166         }
6167         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6168                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6169                 ), true, APIError::ChannelUnavailable { .. }, {});
6170
6171         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6172 }
6173
6174 #[test]
6175 fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() {
6176         //BOLT 2 Requirement: if the sum of total offered HTLCs would exceed the remote's max_htlc_value_in_flight_msat: MUST NOT add an HTLC.
6177         let chanmon_cfgs = create_chanmon_cfgs(2);
6178         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6179         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6180         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6181         let channel_value = 100000;
6182         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 0);
6183         let max_in_flight = get_channel_value_stat!(nodes[0], nodes[1], chan.2).counterparty_max_htlc_value_in_flight_msat;
6184
6185         send_payment(&nodes[0], &vec!(&nodes[1])[..], max_in_flight);
6186
6187         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_in_flight);
6188         // Manually create a route over our max in flight (which our router normally automatically
6189         // limits us to.
6190         route.paths[0].hops[0].fee_msat =  max_in_flight + 1;
6191         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6192                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6193                 ), true, APIError::ChannelUnavailable { .. }, {});
6194         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6195
6196         send_payment(&nodes[0], &[&nodes[1]], max_in_flight);
6197 }
6198
6199 // BOLT 2 Requirements for the Receiver when handling an update_add_htlc message.
6200 #[test]
6201 fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
6202         //BOLT2 Requirement: receiving an amount_msat equal to 0, OR less than its own htlc_minimum_msat -> SHOULD fail the channel.
6203         let chanmon_cfgs = create_chanmon_cfgs(2);
6204         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6205         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6206         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6207         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6208         let htlc_minimum_msat: u64;
6209         {
6210                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
6211                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
6212                 let channel = chan_lock.channel_by_id.get(&chan.2).unwrap();
6213                 htlc_minimum_msat = channel.context.get_holder_htlc_minimum_msat();
6214         }
6215
6216         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat);
6217         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6218                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6219         check_added_monitors!(nodes[0], 1);
6220         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6221         updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat-1;
6222         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6223         assert!(nodes[1].node.list_channels().is_empty());
6224         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6225         assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
6226         check_added_monitors!(nodes[1], 1);
6227         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
6228 }
6229
6230 #[test]
6231 fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
6232         //BOLT2 Requirement: receiving an amount_msat that the sending node cannot afford at the current feerate_per_kw (while maintaining its channel reserve): SHOULD fail the channel
6233         let chanmon_cfgs = create_chanmon_cfgs(2);
6234         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6235         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6236         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6237         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6238
6239         let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6240         let channel_reserve = chan_stat.channel_reserve_msat;
6241         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
6242         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
6243         // The 2* and +1 are for the fee spike reserve.
6244         let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
6245
6246         let max_can_send = 5000000 - channel_reserve - commit_tx_fee_outbound;
6247         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
6248         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6249                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6250         check_added_monitors!(nodes[0], 1);
6251         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6252
6253         // Even though channel-initiator senders are required to respect the fee_spike_reserve,
6254         // at this time channel-initiatee receivers are not required to enforce that senders
6255         // respect the fee_spike_reserve.
6256         updates.update_add_htlcs[0].amount_msat = max_can_send + commit_tx_fee_outbound + 1;
6257         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6258
6259         assert!(nodes[1].node.list_channels().is_empty());
6260         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6261         assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
6262         check_added_monitors!(nodes[1], 1);
6263         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
6264 }
6265
6266 #[test]
6267 fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
6268         //BOLT 2 Requirement: if a sending node adds more than its max_accepted_htlcs HTLCs to its local commitment transaction: SHOULD fail the channel
6269         //BOLT 2 Requirement: MUST allow multiple HTLCs with the same payment_hash.
6270         let chanmon_cfgs = create_chanmon_cfgs(2);
6271         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6272         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6273         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6274         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6275
6276         let send_amt = 3999999;
6277         let (mut route, our_payment_hash, _, our_payment_secret) =
6278                 get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
6279         route.paths[0].hops[0].fee_msat = send_amt;
6280         let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
6281         let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1;
6282         let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route.paths[0], &session_priv).unwrap();
6283         let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
6284                 &route.paths[0], send_amt, RecipientOnionFields::secret_only(our_payment_secret), cur_height, &None).unwrap();
6285         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap();
6286
6287         let mut msg = msgs::UpdateAddHTLC {
6288                 channel_id: chan.2,
6289                 htlc_id: 0,
6290                 amount_msat: 1000,
6291                 payment_hash: our_payment_hash,
6292                 cltv_expiry: htlc_cltv,
6293                 onion_routing_packet: onion_packet.clone(),
6294                 skimmed_fee_msat: None,
6295         };
6296
6297         for i in 0..50 {
6298                 msg.htlc_id = i as u64;
6299                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
6300         }
6301         msg.htlc_id = (50) as u64;
6302         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
6303
6304         assert!(nodes[1].node.list_channels().is_empty());
6305         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6306         assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
6307         check_added_monitors!(nodes[1], 1);
6308         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
6309 }
6310
6311 #[test]
6312 fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() {
6313         //OR adds more than its max_htlc_value_in_flight_msat worth of offered HTLCs to its local commitment transaction: SHOULD fail the channel
6314         let chanmon_cfgs = create_chanmon_cfgs(2);
6315         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6316         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6317         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6318         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6319
6320         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6321         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6322                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6323         check_added_monitors!(nodes[0], 1);
6324         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6325         updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], nodes[0], chan.2).counterparty_max_htlc_value_in_flight_msat + 1;
6326         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6327
6328         assert!(nodes[1].node.list_channels().is_empty());
6329         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6330         assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str()));
6331         check_added_monitors!(nodes[1], 1);
6332         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
6333 }
6334
6335 #[test]
6336 fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() {
6337         //BOLT2 Requirement: if sending node sets cltv_expiry to greater or equal to 500000000: SHOULD fail the channel.
6338         let chanmon_cfgs = create_chanmon_cfgs(2);
6339         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6340         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6341         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6342
6343         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6344         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6345         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6346                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6347         check_added_monitors!(nodes[0], 1);
6348         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6349         updates.update_add_htlcs[0].cltv_expiry = 500000000;
6350         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6351
6352         assert!(nodes[1].node.list_channels().is_empty());
6353         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6354         assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height");
6355         check_added_monitors!(nodes[1], 1);
6356         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
6357 }
6358
6359 #[test]
6360 fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
6361         //BOLT 2 requirement: if the sender did not previously acknowledge the commitment of that HTLC: MUST ignore a repeated id value after a reconnection.
6362         // We test this by first testing that that repeated HTLCs pass commitment signature checks
6363         // after disconnect and that non-sequential htlc_ids result in a channel failure.
6364         let chanmon_cfgs = create_chanmon_cfgs(2);
6365         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6366         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6367         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6368
6369         create_announced_chan_between_nodes(&nodes, 0, 1);
6370         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6371         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6372                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6373         check_added_monitors!(nodes[0], 1);
6374         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6375         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6376
6377         //Disconnect and Reconnect
6378         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
6379         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
6380         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
6381                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
6382         }, true).unwrap();
6383         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
6384         assert_eq!(reestablish_1.len(), 1);
6385         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
6386                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
6387         }, false).unwrap();
6388         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
6389         assert_eq!(reestablish_2.len(), 1);
6390         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
6391         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
6392         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
6393         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
6394
6395         //Resend HTLC
6396         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6397         assert_eq!(updates.commitment_signed.htlc_signatures.len(), 1);
6398         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed);
6399         check_added_monitors!(nodes[1], 1);
6400         let _bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6401
6402         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6403
6404         assert!(nodes[1].node.list_channels().is_empty());
6405         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6406         assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str()));
6407         check_added_monitors!(nodes[1], 1);
6408         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
6409 }
6410
6411 #[test]
6412 fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() {
6413         //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6414
6415         let chanmon_cfgs = create_chanmon_cfgs(2);
6416         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6417         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6418         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6419         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6420         let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6421         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6422                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6423
6424         check_added_monitors!(nodes[0], 1);
6425         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6426         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6427
6428         let update_msg = msgs::UpdateFulfillHTLC{
6429                 channel_id: chan.2,
6430                 htlc_id: 0,
6431                 payment_preimage: our_payment_preimage,
6432         };
6433
6434         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6435
6436         assert!(nodes[0].node.list_channels().is_empty());
6437         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6438         assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6439         check_added_monitors!(nodes[0], 1);
6440         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
6441 }
6442
6443 #[test]
6444 fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() {
6445         //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6446
6447         let chanmon_cfgs = create_chanmon_cfgs(2);
6448         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6449         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6450         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6451         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6452
6453         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6454         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6455                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6456         check_added_monitors!(nodes[0], 1);
6457         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6458         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6459
6460         let update_msg = msgs::UpdateFailHTLC{
6461                 channel_id: chan.2,
6462                 htlc_id: 0,
6463                 reason: msgs::OnionErrorPacket { data: Vec::new()},
6464         };
6465
6466         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6467
6468         assert!(nodes[0].node.list_channels().is_empty());
6469         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6470         assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6471         check_added_monitors!(nodes[0], 1);
6472         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
6473 }
6474
6475 #[test]
6476 fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() {
6477         //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6478
6479         let chanmon_cfgs = create_chanmon_cfgs(2);
6480         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6481         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6482         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6483         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6484
6485         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6486         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6487                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6488         check_added_monitors!(nodes[0], 1);
6489         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6490         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6491         let update_msg = msgs::UpdateFailMalformedHTLC{
6492                 channel_id: chan.2,
6493                 htlc_id: 0,
6494                 sha256_of_onion: [1; 32],
6495                 failure_code: 0x8000,
6496         };
6497
6498         nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6499
6500         assert!(nodes[0].node.list_channels().is_empty());
6501         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6502         assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6503         check_added_monitors!(nodes[0], 1);
6504         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
6505 }
6506
6507 #[test]
6508 fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
6509         //BOLT 2 Requirement: A receiving node: if the id does not correspond to an HTLC in its current commitment transaction MUST fail the channel.
6510
6511         let chanmon_cfgs = create_chanmon_cfgs(2);
6512         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6513         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6514         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6515         create_announced_chan_between_nodes(&nodes, 0, 1);
6516
6517         let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
6518
6519         nodes[1].node.claim_funds(our_payment_preimage);
6520         check_added_monitors!(nodes[1], 1);
6521         expect_payment_claimed!(nodes[1], our_payment_hash, 100_000);
6522
6523         let events = nodes[1].node.get_and_clear_pending_msg_events();
6524         assert_eq!(events.len(), 1);
6525         let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
6526                 match events[0] {
6527                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6528                                 assert!(update_add_htlcs.is_empty());
6529                                 assert_eq!(update_fulfill_htlcs.len(), 1);
6530                                 assert!(update_fail_htlcs.is_empty());
6531                                 assert!(update_fail_malformed_htlcs.is_empty());
6532                                 assert!(update_fee.is_none());
6533                                 update_fulfill_htlcs[0].clone()
6534                         },
6535                         _ => panic!("Unexpected event"),
6536                 }
6537         };
6538
6539         update_fulfill_msg.htlc_id = 1;
6540
6541         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
6542
6543         assert!(nodes[0].node.list_channels().is_empty());
6544         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6545         assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find");
6546         check_added_monitors!(nodes[0], 1);
6547         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
6548 }
6549
6550 #[test]
6551 fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
6552         //BOLT 2 Requirement: A receiving node: if the payment_preimage value in update_fulfill_htlc doesn't SHA256 hash to the corresponding HTLC payment_hash MUST fail the channel.
6553
6554         let chanmon_cfgs = create_chanmon_cfgs(2);
6555         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6556         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6557         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6558         create_announced_chan_between_nodes(&nodes, 0, 1);
6559
6560         let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
6561
6562         nodes[1].node.claim_funds(our_payment_preimage);
6563         check_added_monitors!(nodes[1], 1);
6564         expect_payment_claimed!(nodes[1], our_payment_hash, 100_000);
6565
6566         let events = nodes[1].node.get_and_clear_pending_msg_events();
6567         assert_eq!(events.len(), 1);
6568         let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
6569                 match events[0] {
6570                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6571                                 assert!(update_add_htlcs.is_empty());
6572                                 assert_eq!(update_fulfill_htlcs.len(), 1);
6573                                 assert!(update_fail_htlcs.is_empty());
6574                                 assert!(update_fail_malformed_htlcs.is_empty());
6575                                 assert!(update_fee.is_none());
6576                                 update_fulfill_htlcs[0].clone()
6577                         },
6578                         _ => panic!("Unexpected event"),
6579                 }
6580         };
6581
6582         update_fulfill_msg.payment_preimage = PaymentPreimage([1; 32]);
6583
6584         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
6585
6586         assert!(nodes[0].node.list_channels().is_empty());
6587         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6588         assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str()));
6589         check_added_monitors!(nodes[0], 1);
6590         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
6591 }
6592
6593 #[test]
6594 fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_message() {
6595         //BOLT 2 Requirement: A receiving node: if the BADONION bit in failure_code is not set for update_fail_malformed_htlc MUST fail the channel.
6596
6597         let chanmon_cfgs = create_chanmon_cfgs(2);
6598         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6599         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6600         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6601         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6602
6603         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6604         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6605                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6606         check_added_monitors!(nodes[0], 1);
6607
6608         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6609         updates.update_add_htlcs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
6610
6611         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6612         check_added_monitors!(nodes[1], 0);
6613         commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true);
6614
6615         let events = nodes[1].node.get_and_clear_pending_msg_events();
6616
6617         let mut update_msg: msgs::UpdateFailMalformedHTLC = {
6618                 match events[0] {
6619                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6620                                 assert!(update_add_htlcs.is_empty());
6621                                 assert!(update_fulfill_htlcs.is_empty());
6622                                 assert!(update_fail_htlcs.is_empty());
6623                                 assert_eq!(update_fail_malformed_htlcs.len(), 1);
6624                                 assert!(update_fee.is_none());
6625                                 update_fail_malformed_htlcs[0].clone()
6626                         },
6627                         _ => panic!("Unexpected event"),
6628                 }
6629         };
6630         update_msg.failure_code &= !0x8000;
6631         nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6632
6633         assert!(nodes[0].node.list_channels().is_empty());
6634         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6635         assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set");
6636         check_added_monitors!(nodes[0], 1);
6637         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
6638 }
6639
6640 #[test]
6641 fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_update_fail_htlc() {
6642         //BOLT 2 Requirement: a receiving node which has an outgoing HTLC canceled by update_fail_malformed_htlc:
6643         //    * MUST return an error in the update_fail_htlc sent to the link which originally sent the HTLC, using the failure_code given and setting the data to sha256_of_onion.
6644
6645         let chanmon_cfgs = create_chanmon_cfgs(3);
6646         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6647         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
6648         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6649         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6650         let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000);
6651
6652         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
6653
6654         //First hop
6655         let mut payment_event = {
6656                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6657                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6658                 check_added_monitors!(nodes[0], 1);
6659                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6660                 assert_eq!(events.len(), 1);
6661                 SendEvent::from_event(events.remove(0))
6662         };
6663         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6664         check_added_monitors!(nodes[1], 0);
6665         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6666         expect_pending_htlcs_forwardable!(nodes[1]);
6667         let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
6668         assert_eq!(events_2.len(), 1);
6669         check_added_monitors!(nodes[1], 1);
6670         payment_event = SendEvent::from_event(events_2.remove(0));
6671         assert_eq!(payment_event.msgs.len(), 1);
6672
6673         //Second Hop
6674         payment_event.msgs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
6675         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
6676         check_added_monitors!(nodes[2], 0);
6677         commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true);
6678
6679         let events_3 = nodes[2].node.get_and_clear_pending_msg_events();
6680         assert_eq!(events_3.len(), 1);
6681         let update_msg : (msgs::UpdateFailMalformedHTLC, msgs::CommitmentSigned) = {
6682                 match events_3[0] {
6683                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
6684                                 assert!(update_add_htlcs.is_empty());
6685                                 assert!(update_fulfill_htlcs.is_empty());
6686                                 assert!(update_fail_htlcs.is_empty());
6687                                 assert_eq!(update_fail_malformed_htlcs.len(), 1);
6688                                 assert!(update_fee.is_none());
6689                                 (update_fail_malformed_htlcs[0].clone(), commitment_signed.clone())
6690                         },
6691                         _ => panic!("Unexpected event"),
6692                 }
6693         };
6694
6695         nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), &update_msg.0);
6696
6697         check_added_monitors!(nodes[1], 0);
6698         commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true);
6699         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
6700         let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
6701         assert_eq!(events_4.len(), 1);
6702
6703         //Confirm that handlinge the update_malformed_htlc message produces an update_fail_htlc message to be forwarded back along the route
6704         match events_4[0] {
6705                 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6706                         assert!(update_add_htlcs.is_empty());
6707                         assert!(update_fulfill_htlcs.is_empty());
6708                         assert_eq!(update_fail_htlcs.len(), 1);
6709                         assert!(update_fail_malformed_htlcs.is_empty());
6710                         assert!(update_fee.is_none());
6711                 },
6712                 _ => panic!("Unexpected event"),
6713         };
6714
6715         check_added_monitors!(nodes[1], 1);
6716 }
6717
6718 #[test]
6719 fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() {
6720         let chanmon_cfgs = create_chanmon_cfgs(3);
6721         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6722         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
6723         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6724         create_announced_chan_between_nodes(&nodes, 0, 1);
6725         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
6726
6727         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100_000);
6728
6729         // First hop
6730         let mut payment_event = {
6731                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6732                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6733                 check_added_monitors!(nodes[0], 1);
6734                 SendEvent::from_node(&nodes[0])
6735         };
6736
6737         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6738         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6739         expect_pending_htlcs_forwardable!(nodes[1]);
6740         check_added_monitors!(nodes[1], 1);
6741         payment_event = SendEvent::from_node(&nodes[1]);
6742         assert_eq!(payment_event.msgs.len(), 1);
6743
6744         // Second Hop
6745         payment_event.msgs[0].onion_routing_packet.version = 1; // Trigger an invalid_onion_version error
6746         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
6747         check_added_monitors!(nodes[2], 0);
6748         commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true);
6749
6750         let events_3 = nodes[2].node.get_and_clear_pending_msg_events();
6751         assert_eq!(events_3.len(), 1);
6752         match events_3[0] {
6753                 MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
6754                         let mut update_msg = updates.update_fail_malformed_htlcs[0].clone();
6755                         // Set the NODE bit (BADONION and PERM already set in invalid_onion_version error)
6756                         update_msg.failure_code |= 0x2000;
6757
6758                         nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), &update_msg);
6759                         commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true);
6760                 },
6761                 _ => panic!("Unexpected event"),
6762         }
6763
6764         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1],
6765                 vec![HTLCDestination::NextHopChannel {
6766                         node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
6767         let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
6768         assert_eq!(events_4.len(), 1);
6769         check_added_monitors!(nodes[1], 1);
6770
6771         match events_4[0] {
6772                 MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
6773                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
6774                         commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true);
6775                 },
6776                 _ => panic!("Unexpected event"),
6777         }
6778
6779         let events_5 = nodes[0].node.get_and_clear_pending_events();
6780         assert_eq!(events_5.len(), 2);
6781
6782         // Expect a PaymentPathFailed event with a ChannelFailure network update for the channel between
6783         // the node originating the error to its next hop.
6784         match events_5[0] {
6785                 Event::PaymentPathFailed { error_code, failure: PathFailure::OnPath { network_update: Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent }) }, ..
6786                 } => {
6787                         assert_eq!(short_channel_id, chan_2.0.contents.short_channel_id);
6788                         assert!(is_permanent);
6789                         assert_eq!(error_code, Some(0x8000|0x4000|0x2000|4));
6790                 },
6791                 _ => panic!("Unexpected event"),
6792         }
6793         match events_5[1] {
6794                 Event::PaymentFailed { payment_hash, .. } => {
6795                         assert_eq!(payment_hash, our_payment_hash);
6796                 },
6797                 _ => panic!("Unexpected event"),
6798         }
6799
6800         // TODO: Test actual removal of channel from NetworkGraph when it's implemented.
6801 }
6802
6803 fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
6804         // Dust-HTLC failure updates must be delayed until failure-trigger tx (in this case local commitment) reach ANTI_REORG_DELAY
6805         // We can have at most two valid local commitment tx, so both cases must be covered, and both txs must be checked to get them all as
6806         // HTLC could have been removed from lastest local commitment tx but still valid until we get remote RAA
6807
6808         let mut chanmon_cfgs = create_chanmon_cfgs(2);
6809         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
6810         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6811         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6812         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6813         let chan =create_announced_chan_between_nodes(&nodes, 0, 1);
6814
6815         let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
6816                 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis;
6817
6818         // We route 2 dust-HTLCs between A and B
6819         let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
6820         let (_, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
6821         route_payment(&nodes[0], &[&nodes[1]], 1000000);
6822
6823         // Cache one local commitment tx as previous
6824         let as_prev_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
6825
6826         // Fail one HTLC to prune it in the will-be-latest-local commitment tx
6827         nodes[1].node.fail_htlc_backwards(&payment_hash_2);
6828         check_added_monitors!(nodes[1], 0);
6829         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
6830         check_added_monitors!(nodes[1], 1);
6831
6832         let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6833         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &remove.update_fail_htlcs[0]);
6834         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &remove.commitment_signed);
6835         check_added_monitors!(nodes[0], 1);
6836
6837         // Cache one local commitment tx as lastest
6838         let as_last_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
6839
6840         let events = nodes[0].node.get_and_clear_pending_msg_events();
6841         match events[0] {
6842                 MessageSendEvent::SendRevokeAndACK { node_id, .. } => {
6843                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
6844                 },
6845                 _ => panic!("Unexpected event"),
6846         }
6847         match events[1] {
6848                 MessageSendEvent::UpdateHTLCs { node_id, .. } => {
6849                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
6850                 },
6851                 _ => panic!("Unexpected event"),
6852         }
6853
6854         assert_ne!(as_prev_commitment_tx, as_last_commitment_tx);
6855         // Fail the 2 dust-HTLCs, move their failure in maturation buffer (htlc_updated_waiting_threshold_conf)
6856         if announce_latest {
6857                 mine_transaction(&nodes[0], &as_last_commitment_tx[0]);
6858         } else {
6859                 mine_transaction(&nodes[0], &as_prev_commitment_tx[0]);
6860         }
6861
6862         check_closed_broadcast!(nodes[0], true);
6863         check_added_monitors!(nodes[0], 1);
6864         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
6865
6866         assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
6867         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
6868         let events = nodes[0].node.get_and_clear_pending_events();
6869         // Only 2 PaymentPathFailed events should show up, over-dust HTLC has to be failed by timeout tx
6870         assert_eq!(events.len(), 4);
6871         let mut first_failed = false;
6872         for event in events {
6873                 match event {
6874                         Event::PaymentPathFailed { payment_hash, .. } => {
6875                                 if payment_hash == payment_hash_1 {
6876                                         assert!(!first_failed);
6877                                         first_failed = true;
6878                                 } else {
6879                                         assert_eq!(payment_hash, payment_hash_2);
6880                                 }
6881                         },
6882                         Event::PaymentFailed { .. } => {}
6883                         _ => panic!("Unexpected event"),
6884                 }
6885         }
6886 }
6887
6888 #[test]
6889 fn test_failure_delay_dust_htlc_local_commitment() {
6890         do_test_failure_delay_dust_htlc_local_commitment(true);
6891         do_test_failure_delay_dust_htlc_local_commitment(false);
6892 }
6893
6894 fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
6895         // Outbound HTLC-failure updates must be cancelled if we get a reorg before we reach ANTI_REORG_DELAY.
6896         // Broadcast of revoked remote commitment tx, trigger failure-update of dust/non-dust HTLCs
6897         // Broadcast of remote commitment tx, trigger failure-update of dust-HTLCs
6898         // Broadcast of timeout tx on remote commitment tx, trigger failure-udate of non-dust HTLCs
6899         // Broadcast of local commitment tx, trigger failure-update of dust-HTLCs
6900         // Broadcast of HTLC-timeout tx on local commitment tx, trigger failure-update of non-dust HTLCs
6901
6902         let chanmon_cfgs = create_chanmon_cfgs(3);
6903         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6904         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
6905         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6906         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6907
6908         let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
6909                 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis;
6910
6911         let (_payment_preimage_1, dust_hash, _payment_secret_1) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
6912         let (_payment_preimage_2, non_dust_hash, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
6913
6914         let as_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
6915         let bs_commitment_tx = get_local_commitment_txn!(nodes[1], chan.2);
6916
6917         // We revoked bs_commitment_tx
6918         if revoked {
6919                 let (payment_preimage_3, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
6920                 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
6921         }
6922
6923         let mut timeout_tx = Vec::new();
6924         if local {
6925                 // We fail dust-HTLC 1 by broadcast of local commitment tx
6926                 mine_transaction(&nodes[0], &as_commitment_tx[0]);
6927                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
6928                 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
6929                 expect_payment_failed!(nodes[0], dust_hash, false);
6930
6931                 connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS - ANTI_REORG_DELAY);
6932                 check_closed_broadcast!(nodes[0], true);
6933                 check_added_monitors!(nodes[0], 1);
6934                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
6935                 timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
6936                 assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
6937                 // We fail non-dust-HTLC 2 by broadcast of local HTLC-timeout tx on local commitment tx
6938                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
6939                 mine_transaction(&nodes[0], &timeout_tx[0]);
6940                 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
6941                 expect_payment_failed!(nodes[0], non_dust_hash, false);
6942         } else {
6943                 // We fail dust-HTLC 1 by broadcast of remote commitment tx. If revoked, fail also non-dust HTLC
6944                 mine_transaction(&nodes[0], &bs_commitment_tx[0]);
6945                 check_closed_broadcast!(nodes[0], true);
6946                 check_added_monitors!(nodes[0], 1);
6947                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
6948                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
6949
6950                 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
6951                 timeout_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().drain(..)
6952                         .filter(|tx| tx.input[0].previous_output.txid == bs_commitment_tx[0].txid()).collect();
6953                 check_spends!(timeout_tx[0], bs_commitment_tx[0]);
6954                 // For both a revoked or non-revoked commitment transaction, after ANTI_REORG_DELAY the
6955                 // dust HTLC should have been failed.
6956                 expect_payment_failed!(nodes[0], dust_hash, false);
6957
6958                 if !revoked {
6959                         assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
6960                 } else {
6961                         assert_eq!(timeout_tx[0].lock_time.0, 11);
6962                 }
6963                 // We fail non-dust-HTLC 2 by broadcast of local timeout/revocation-claim tx
6964                 mine_transaction(&nodes[0], &timeout_tx[0]);
6965                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
6966                 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
6967                 expect_payment_failed!(nodes[0], non_dust_hash, false);
6968         }
6969 }
6970
6971 #[test]
6972 fn test_sweep_outbound_htlc_failure_update() {
6973         do_test_sweep_outbound_htlc_failure_update(false, true);
6974         do_test_sweep_outbound_htlc_failure_update(false, false);
6975         do_test_sweep_outbound_htlc_failure_update(true, false);
6976 }
6977
6978 #[test]
6979 fn test_user_configurable_csv_delay() {
6980         // We test our channel constructors yield errors when we pass them absurd csv delay
6981
6982         let mut low_our_to_self_config = UserConfig::default();
6983         low_our_to_self_config.channel_handshake_config.our_to_self_delay = 6;
6984         let mut high_their_to_self_config = UserConfig::default();
6985         high_their_to_self_config.channel_handshake_limits.their_to_self_delay = 100;
6986         let user_cfgs = [Some(high_their_to_self_config.clone()), None];
6987         let chanmon_cfgs = create_chanmon_cfgs(2);
6988         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6989         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
6990         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6991
6992         // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in OutboundV1Channel::new()
6993         if let Err(error) = OutboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
6994                 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[1].node.init_features(), 1000000, 1000000, 0,
6995                 &low_our_to_self_config, 0, 42)
6996         {
6997                 match error {
6998                         APIError::APIMisuseError { err } => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); },
6999                         _ => panic!("Unexpected event"),
7000                 }
7001         } else { assert!(false) }
7002
7003         // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in InboundV1Channel::new()
7004         nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
7005         let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
7006         open_channel.to_self_delay = 200;
7007         if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
7008                 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
7009                 &low_our_to_self_config, 0, &nodes[0].logger, 42)
7010         {
7011                 match error {
7012                         ChannelError::Close(err) => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str()));  },
7013                         _ => panic!("Unexpected event"),
7014                 }
7015         } else { assert!(false); }
7016
7017         // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Chanel::accept_channel()
7018         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
7019         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
7020         let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
7021         accept_channel.to_self_delay = 200;
7022         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
7023         let reason_msg;
7024         if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] {
7025                 match action {
7026                         &ErrorAction::SendErrorMessage { ref msg } => {
7027                                 assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(msg.data.as_str()));
7028                                 reason_msg = msg.data.clone();
7029                         },
7030                         _ => { panic!(); }
7031                 }
7032         } else { panic!(); }
7033         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg });
7034
7035         // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new()
7036         nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
7037         let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
7038         open_channel.to_self_delay = 200;
7039         if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
7040                 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
7041                 &high_their_to_self_config, 0, &nodes[0].logger, 42)
7042         {
7043                 match error {
7044                         ChannelError::Close(err) => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(err.as_str())); },
7045                         _ => panic!("Unexpected event"),
7046                 }
7047         } else { assert!(false); }
7048 }
7049
7050 #[test]
7051 fn test_check_htlc_underpaying() {
7052         // Send payment through A -> B but A is maliciously
7053         // sending a probe payment (i.e less than expected value0
7054         // to B, B should refuse payment.
7055
7056         let chanmon_cfgs = create_chanmon_cfgs(2);
7057         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7058         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7059         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7060
7061         // Create some initial channels
7062         create_announced_chan_between_nodes(&nodes, 0, 1);
7063
7064         let scorer = test_utils::TestScorer::new();
7065         let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
7066         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV).with_bolt11_features(nodes[1].node.invoice_features()).unwrap();
7067         let route = get_route(&nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(), None, 10_000, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
7068         let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]);
7069         let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, None).unwrap();
7070         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
7071                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
7072         check_added_monitors!(nodes[0], 1);
7073
7074         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
7075         assert_eq!(events.len(), 1);
7076         let mut payment_event = SendEvent::from_event(events.pop().unwrap());
7077         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
7078         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
7079
7080         // Note that we first have to wait a random delay before processing the receipt of the HTLC,
7081         // and then will wait a second random delay before failing the HTLC back:
7082         expect_pending_htlcs_forwardable!(nodes[1]);
7083         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
7084
7085         // Node 3 is expecting payment of 100_000 but received 10_000,
7086         // it should fail htlc like we didn't know the preimage.
7087         nodes[1].node.process_pending_htlc_forwards();
7088
7089         let events = nodes[1].node.get_and_clear_pending_msg_events();
7090         assert_eq!(events.len(), 1);
7091         let (update_fail_htlc, commitment_signed) = match events[0] {
7092                 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
7093                         assert!(update_add_htlcs.is_empty());
7094                         assert!(update_fulfill_htlcs.is_empty());
7095                         assert_eq!(update_fail_htlcs.len(), 1);
7096                         assert!(update_fail_malformed_htlcs.is_empty());
7097                         assert!(update_fee.is_none());
7098                         (update_fail_htlcs[0].clone(), commitment_signed)
7099                 },
7100                 _ => panic!("Unexpected event"),
7101         };
7102         check_added_monitors!(nodes[1], 1);
7103
7104         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlc);
7105         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
7106
7107         // 10_000 msat as u64, followed by a height of CHAN_CONFIRM_DEPTH as u32
7108         let mut expected_failure_data = (10_000 as u64).to_be_bytes().to_vec();
7109         expected_failure_data.extend_from_slice(&CHAN_CONFIRM_DEPTH.to_be_bytes());
7110         expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000|15, &expected_failure_data[..]);
7111 }
7112
7113 #[test]
7114 fn test_announce_disable_channels() {
7115         // Create 2 channels between A and B. Disconnect B. Call timer_tick_occurred and check for generated
7116         // ChannelUpdate. Reconnect B, reestablish and check there is non-generated ChannelUpdate.
7117
7118         let chanmon_cfgs = create_chanmon_cfgs(2);
7119         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7120         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7121         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7122
7123         create_announced_chan_between_nodes(&nodes, 0, 1);
7124         create_announced_chan_between_nodes(&nodes, 1, 0);
7125         create_announced_chan_between_nodes(&nodes, 0, 1);
7126
7127         // Disconnect peers
7128         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
7129         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
7130
7131         for _ in 0..DISABLE_GOSSIP_TICKS + 1 {
7132                 nodes[0].node.timer_tick_occurred();
7133         }
7134         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
7135         assert_eq!(msg_events.len(), 3);
7136         let mut chans_disabled = HashMap::new();
7137         for e in msg_events {
7138                 match e {
7139                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
7140                                 assert_eq!(msg.contents.flags & (1<<1), 1<<1); // The "channel disabled" bit should be set
7141                                 // Check that each channel gets updated exactly once
7142                                 if chans_disabled.insert(msg.contents.short_channel_id, msg.contents.timestamp).is_some() {
7143                                         panic!("Generated ChannelUpdate for wrong chan!");
7144                                 }
7145                         },
7146                         _ => panic!("Unexpected event"),
7147                 }
7148         }
7149         // Reconnect peers
7150         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
7151                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
7152         }, true).unwrap();
7153         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
7154         assert_eq!(reestablish_1.len(), 3);
7155         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
7156                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
7157         }, false).unwrap();
7158         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
7159         assert_eq!(reestablish_2.len(), 3);
7160
7161         // Reestablish chan_1
7162         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
7163         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7164         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
7165         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7166         // Reestablish chan_2
7167         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[1]);
7168         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7169         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[1]);
7170         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7171         // Reestablish chan_3
7172         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[2]);
7173         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7174         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[2]);
7175         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7176
7177         for _ in 0..ENABLE_GOSSIP_TICKS {
7178                 nodes[0].node.timer_tick_occurred();
7179         }
7180         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7181         nodes[0].node.timer_tick_occurred();
7182         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
7183         assert_eq!(msg_events.len(), 3);
7184         for e in msg_events {
7185                 match e {
7186                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
7187                                 assert_eq!(msg.contents.flags & (1<<1), 0); // The "channel disabled" bit should be off
7188                                 match chans_disabled.remove(&msg.contents.short_channel_id) {
7189                                         // Each update should have a higher timestamp than the previous one, replacing
7190                                         // the old one.
7191                                         Some(prev_timestamp) => assert!(msg.contents.timestamp > prev_timestamp),
7192                                         None => panic!("Generated ChannelUpdate for wrong chan!"),
7193                                 }
7194                         },
7195                         _ => panic!("Unexpected event"),
7196                 }
7197         }
7198         // Check that each channel gets updated exactly once
7199         assert!(chans_disabled.is_empty());
7200 }
7201
7202 #[test]
7203 fn test_bump_penalty_txn_on_revoked_commitment() {
7204         // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to be sure
7205         // we're able to claim outputs on revoked commitment transaction before timelocks expiration
7206
7207         let chanmon_cfgs = create_chanmon_cfgs(2);
7208         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7209         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7210         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7211
7212         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7213
7214         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
7215         let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 30)
7216                 .with_bolt11_features(nodes[0].node.invoice_features()).unwrap();
7217         let (route,_, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], payment_params, 3000000);
7218         send_along_route(&nodes[1], route, &vec!(&nodes[0])[..], 3000000);
7219
7220         let revoked_txn = get_local_commitment_txn!(nodes[0], chan.2);
7221         // Revoked commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
7222         assert_eq!(revoked_txn[0].output.len(), 4);
7223         assert_eq!(revoked_txn[0].input.len(), 1);
7224         assert_eq!(revoked_txn[0].input[0].previous_output.txid, chan.3.txid());
7225         let revoked_txid = revoked_txn[0].txid();
7226
7227         let mut penalty_sum = 0;
7228         for outp in revoked_txn[0].output.iter() {
7229                 if outp.script_pubkey.is_v0_p2wsh() {
7230                         penalty_sum += outp.value;
7231                 }
7232         }
7233
7234         // Connect blocks to change height_timer range to see if we use right soonest_timelock
7235         let header_114 = connect_blocks(&nodes[1], 14);
7236
7237         // Actually revoke tx by claiming a HTLC
7238         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
7239         connect_block(&nodes[1], &create_dummy_block(header_114, 42, vec![revoked_txn[0].clone()]));
7240         check_added_monitors!(nodes[1], 1);
7241
7242         // One or more justice tx should have been broadcast, check it
7243         let penalty_1;
7244         let feerate_1;
7245         {
7246                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7247                 assert_eq!(node_txn.len(), 1); // justice tx (broadcasted from ChannelMonitor)
7248                 assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7249                 assert_eq!(node_txn[0].output.len(), 1);
7250                 check_spends!(node_txn[0], revoked_txn[0]);
7251                 let fee_1 = penalty_sum - node_txn[0].output[0].value;
7252                 feerate_1 = fee_1 * 1000 / node_txn[0].weight() as u64;
7253                 penalty_1 = node_txn[0].txid();
7254                 node_txn.clear();
7255         };
7256
7257         // After exhaustion of height timer, a new bumped justice tx should have been broadcast, check it
7258         connect_blocks(&nodes[1], 15);
7259         let mut penalty_2 = penalty_1;
7260         let mut feerate_2 = 0;
7261         {
7262                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7263                 assert_eq!(node_txn.len(), 1);
7264                 if node_txn[0].input[0].previous_output.txid == revoked_txid {
7265                         assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7266                         assert_eq!(node_txn[0].output.len(), 1);
7267                         check_spends!(node_txn[0], revoked_txn[0]);
7268                         penalty_2 = node_txn[0].txid();
7269                         // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
7270                         assert_ne!(penalty_2, penalty_1);
7271                         let fee_2 = penalty_sum - node_txn[0].output[0].value;
7272                         feerate_2 = fee_2 * 1000 / node_txn[0].weight() as u64;
7273                         // Verify 25% bump heuristic
7274                         assert!(feerate_2 * 100 >= feerate_1 * 125);
7275                         node_txn.clear();
7276                 }
7277         }
7278         assert_ne!(feerate_2, 0);
7279
7280         // After exhaustion of height timer for a 2nd time, a new bumped justice tx should have been broadcast, check it
7281         connect_blocks(&nodes[1], 1);
7282         let penalty_3;
7283         let mut feerate_3 = 0;
7284         {
7285                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7286                 assert_eq!(node_txn.len(), 1);
7287                 if node_txn[0].input[0].previous_output.txid == revoked_txid {
7288                         assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7289                         assert_eq!(node_txn[0].output.len(), 1);
7290                         check_spends!(node_txn[0], revoked_txn[0]);
7291                         penalty_3 = node_txn[0].txid();
7292                         // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
7293                         assert_ne!(penalty_3, penalty_2);
7294                         let fee_3 = penalty_sum - node_txn[0].output[0].value;
7295                         feerate_3 = fee_3 * 1000 / node_txn[0].weight() as u64;
7296                         // Verify 25% bump heuristic
7297                         assert!(feerate_3 * 100 >= feerate_2 * 125);
7298                         node_txn.clear();
7299                 }
7300         }
7301         assert_ne!(feerate_3, 0);
7302
7303         nodes[1].node.get_and_clear_pending_events();
7304         nodes[1].node.get_and_clear_pending_msg_events();
7305 }
7306
7307 #[test]
7308 fn test_bump_penalty_txn_on_revoked_htlcs() {
7309         // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to sure
7310         // we're able to claim outputs on revoked HTLC transactions before timelocks expiration
7311
7312         let mut chanmon_cfgs = create_chanmon_cfgs(2);
7313         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
7314         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7315         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7316         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7317
7318         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7319         // Lock HTLC in both directions (using a slightly lower CLTV delay to provide timely RBF bumps)
7320         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 50).with_bolt11_features(nodes[1].node.invoice_features()).unwrap();
7321         let scorer = test_utils::TestScorer::new();
7322         let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
7323         let route = get_route(&nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(), None,
7324                 3_000_000, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
7325         let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 3_000_000).0;
7326         let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 50).with_bolt11_features(nodes[0].node.invoice_features()).unwrap();
7327         let route = get_route(&nodes[1].node.get_our_node_id(), &payment_params, &nodes[1].network_graph.read_only(), None,
7328                 3_000_000, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
7329         send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000);
7330
7331         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
7332         assert_eq!(revoked_local_txn[0].input.len(), 1);
7333         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
7334
7335         // Revoke local commitment tx
7336         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
7337
7338         // B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx
7339         connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone()]));
7340         check_closed_broadcast!(nodes[1], true);
7341         check_added_monitors!(nodes[1], 1);
7342         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
7343         connect_blocks(&nodes[1], 50); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
7344
7345         let revoked_htlc_txn = {
7346                 let txn = nodes[1].tx_broadcaster.unique_txn_broadcast();
7347                 assert_eq!(txn.len(), 2);
7348
7349                 assert_eq!(txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
7350                 assert_eq!(txn[0].input.len(), 1);
7351                 check_spends!(txn[0], revoked_local_txn[0]);
7352
7353                 assert_eq!(txn[1].input.len(), 1);
7354                 assert_eq!(txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
7355                 assert_eq!(txn[1].output.len(), 1);
7356                 check_spends!(txn[1], revoked_local_txn[0]);
7357
7358                 txn
7359         };
7360
7361         // Broadcast set of revoked txn on A
7362         let hash_128 = connect_blocks(&nodes[0], 40);
7363         let block_11 = create_dummy_block(hash_128, 42, vec![revoked_local_txn[0].clone()]);
7364         connect_block(&nodes[0], &block_11);
7365         let block_129 = create_dummy_block(block_11.block_hash(), 42, vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()]);
7366         connect_block(&nodes[0], &block_129);
7367         let events = nodes[0].node.get_and_clear_pending_events();
7368         expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
7369         match events.last().unwrap() {
7370                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
7371                 _ => panic!("Unexpected event"),
7372         }
7373         let first;
7374         let feerate_1;
7375         let penalty_txn;
7376         {
7377                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7378                 assert_eq!(node_txn.len(), 4); // 3 penalty txn on revoked commitment tx + 1 penalty tnx on revoked HTLC txn
7379                 // Verify claim tx are spending revoked HTLC txn
7380
7381                 // node_txn 0-2 each spend a separate revoked output from revoked_local_txn[0]
7382                 // Note that node_txn[0] and node_txn[1] are bogus - they double spend the revoked_htlc_txn
7383                 // which are included in the same block (they are broadcasted because we scan the
7384                 // transactions linearly and generate claims as we go, they likely should be removed in the
7385                 // future).
7386                 assert_eq!(node_txn[0].input.len(), 1);
7387                 check_spends!(node_txn[0], revoked_local_txn[0]);
7388                 assert_eq!(node_txn[1].input.len(), 1);
7389                 check_spends!(node_txn[1], revoked_local_txn[0]);
7390                 assert_eq!(node_txn[2].input.len(), 1);
7391                 check_spends!(node_txn[2], revoked_local_txn[0]);
7392
7393                 // Each of the three justice transactions claim a separate (single) output of the three
7394                 // available, which we check here:
7395                 assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
7396                 assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output);
7397                 assert_ne!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output);
7398
7399                 assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[1].input[0].previous_output);
7400                 assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
7401
7402                 // node_txn[3] spends the revoked outputs from the revoked_htlc_txn (which only have one
7403                 // output, checked above).
7404                 assert_eq!(node_txn[3].input.len(), 2);
7405                 assert_eq!(node_txn[3].output.len(), 1);
7406                 check_spends!(node_txn[3], revoked_htlc_txn[0], revoked_htlc_txn[1]);
7407
7408                 first = node_txn[3].txid();
7409                 // Store both feerates for later comparison
7410                 let fee_1 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[3].output[0].value;
7411                 feerate_1 = fee_1 * 1000 / node_txn[3].weight() as u64;
7412                 penalty_txn = vec![node_txn[2].clone()];
7413                 node_txn.clear();
7414         }
7415
7416         // Connect one more block to see if bumped penalty are issued for HTLC txn
7417         let block_130 = create_dummy_block(block_129.block_hash(), 42, penalty_txn);
7418         connect_block(&nodes[0], &block_130);
7419         let block_131 = create_dummy_block(block_130.block_hash(), 42, Vec::new());
7420         connect_block(&nodes[0], &block_131);
7421
7422         // Few more blocks to confirm penalty txn
7423         connect_blocks(&nodes[0], 4);
7424         assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
7425         let header_144 = connect_blocks(&nodes[0], 9);
7426         let node_txn = {
7427                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7428                 assert_eq!(node_txn.len(), 1);
7429
7430                 assert_eq!(node_txn[0].input.len(), 2);
7431                 check_spends!(node_txn[0], revoked_htlc_txn[0], revoked_htlc_txn[1]);
7432                 // Verify bumped tx is different and 25% bump heuristic
7433                 assert_ne!(first, node_txn[0].txid());
7434                 let fee_2 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[0].output[0].value;
7435                 let feerate_2 = fee_2 * 1000 / node_txn[0].weight() as u64;
7436                 assert!(feerate_2 * 100 > feerate_1 * 125);
7437                 let txn = vec![node_txn[0].clone()];
7438                 node_txn.clear();
7439                 txn
7440         };
7441         // Broadcast claim txn and confirm blocks to avoid further bumps on this outputs
7442         connect_block(&nodes[0], &create_dummy_block(header_144, 42, node_txn));
7443         connect_blocks(&nodes[0], 20);
7444         {
7445                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7446                 // We verify than no new transaction has been broadcast because previously
7447                 // we were buggy on this exact behavior by not tracking for monitoring remote HTLC outputs (see #411)
7448                 // which means we wouldn't see a spend of them by a justice tx and bumped justice tx
7449                 // were generated forever instead of safe cleaning after confirmation and ANTI_REORG_SAFE_DELAY blocks.
7450                 // Enforce spending of revoked htlc output by claiming transaction remove request as expected and dry
7451                 // up bumped justice generation.
7452                 assert_eq!(node_txn.len(), 0);
7453                 node_txn.clear();
7454         }
7455         check_closed_broadcast!(nodes[0], true);
7456         check_added_monitors!(nodes[0], 1);
7457 }
7458
7459 #[test]
7460 fn test_bump_penalty_txn_on_remote_commitment() {
7461         // In case of claim txn with too low feerates for getting into mempools, RBF-bump them to be sure
7462         // we're able to claim outputs on remote commitment transaction before timelocks expiration
7463
7464         // Create 2 HTLCs
7465         // Provide preimage for one
7466         // Check aggregation
7467
7468         let chanmon_cfgs = create_chanmon_cfgs(2);
7469         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7470         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7471         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7472
7473         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7474         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
7475         route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
7476
7477         // Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
7478         let remote_txn = get_local_commitment_txn!(nodes[0], chan.2);
7479         assert_eq!(remote_txn[0].output.len(), 4);
7480         assert_eq!(remote_txn[0].input.len(), 1);
7481         assert_eq!(remote_txn[0].input[0].previous_output.txid, chan.3.txid());
7482
7483         // Claim a HTLC without revocation (provide B monitor with preimage)
7484         nodes[1].node.claim_funds(payment_preimage);
7485         expect_payment_claimed!(nodes[1], payment_hash, 3_000_000);
7486         mine_transaction(&nodes[1], &remote_txn[0]);
7487         check_added_monitors!(nodes[1], 2);
7488         connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
7489
7490         // One or more claim tx should have been broadcast, check it
7491         let timeout;
7492         let preimage;
7493         let preimage_bump;
7494         let feerate_timeout;
7495         let feerate_preimage;
7496         {
7497                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7498                 // 3 transactions including:
7499                 //   preimage and timeout sweeps from remote commitment + preimage sweep bump
7500                 assert_eq!(node_txn.len(), 3);
7501                 assert_eq!(node_txn[0].input.len(), 1);
7502                 assert_eq!(node_txn[1].input.len(), 1);
7503                 assert_eq!(node_txn[2].input.len(), 1);
7504                 check_spends!(node_txn[0], remote_txn[0]);
7505                 check_spends!(node_txn[1], remote_txn[0]);
7506                 check_spends!(node_txn[2], remote_txn[0]);
7507
7508                 preimage = node_txn[0].txid();
7509                 let index = node_txn[0].input[0].previous_output.vout;
7510                 let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
7511                 feerate_preimage = fee * 1000 / node_txn[0].weight() as u64;
7512
7513                 let (preimage_bump_tx, timeout_tx) = if node_txn[2].input[0].previous_output == node_txn[0].input[0].previous_output {
7514                         (node_txn[2].clone(), node_txn[1].clone())
7515                 } else {
7516                         (node_txn[1].clone(), node_txn[2].clone())
7517                 };
7518
7519                 preimage_bump = preimage_bump_tx;
7520                 check_spends!(preimage_bump, remote_txn[0]);
7521                 assert_eq!(node_txn[0].input[0].previous_output, preimage_bump.input[0].previous_output);
7522
7523                 timeout = timeout_tx.txid();
7524                 let index = timeout_tx.input[0].previous_output.vout;
7525                 let fee = remote_txn[0].output[index as usize].value - timeout_tx.output[0].value;
7526                 feerate_timeout = fee * 1000 / timeout_tx.weight() as u64;
7527
7528                 node_txn.clear();
7529         };
7530         assert_ne!(feerate_timeout, 0);
7531         assert_ne!(feerate_preimage, 0);
7532
7533         // After exhaustion of height timer, new bumped claim txn should have been broadcast, check it
7534         connect_blocks(&nodes[1], 1);
7535         {
7536                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7537                 assert_eq!(node_txn.len(), 1);
7538                 assert_eq!(node_txn[0].input.len(), 1);
7539                 assert_eq!(preimage_bump.input.len(), 1);
7540                 check_spends!(node_txn[0], remote_txn[0]);
7541                 check_spends!(preimage_bump, remote_txn[0]);
7542
7543                 let index = preimage_bump.input[0].previous_output.vout;
7544                 let fee = remote_txn[0].output[index as usize].value - preimage_bump.output[0].value;
7545                 let new_feerate = fee * 1000 / preimage_bump.weight() as u64;
7546                 assert!(new_feerate * 100 > feerate_timeout * 125);
7547                 assert_ne!(timeout, preimage_bump.txid());
7548
7549                 let index = node_txn[0].input[0].previous_output.vout;
7550                 let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
7551                 let new_feerate = fee * 1000 / node_txn[0].weight() as u64;
7552                 assert!(new_feerate * 100 > feerate_preimage * 125);
7553                 assert_ne!(preimage, node_txn[0].txid());
7554
7555                 node_txn.clear();
7556         }
7557
7558         nodes[1].node.get_and_clear_pending_events();
7559         nodes[1].node.get_and_clear_pending_msg_events();
7560 }
7561
7562 #[test]
7563 fn test_counterparty_raa_skip_no_crash() {
7564         // Previously, if our counterparty sent two RAAs in a row without us having provided a
7565         // commitment transaction, we would have happily carried on and provided them the next
7566         // commitment transaction based on one RAA forward. This would probably eventually have led to
7567         // channel closure, but it would not have resulted in funds loss. Still, our
7568         // EnforcingSigner would have panicked as it doesn't like jumps into the future. Here, we
7569         // check simply that the channel is closed in response to such an RAA, but don't check whether
7570         // we decide to punish our counterparty for revoking their funds (as we don't currently
7571         // implement that).
7572         let chanmon_cfgs = create_chanmon_cfgs(2);
7573         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7574         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7575         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7576         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
7577
7578         let per_commitment_secret;
7579         let next_per_commitment_point;
7580         {
7581                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
7582                 let mut guard = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
7583                 let keys = guard.channel_by_id.get_mut(&channel_id).unwrap().get_signer();
7584
7585                 const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
7586
7587                 // Make signer believe we got a counterparty signature, so that it allows the revocation
7588                 keys.get_enforcement_state().last_holder_commitment -= 1;
7589                 per_commitment_secret = keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
7590
7591                 // Must revoke without gaps
7592                 keys.get_enforcement_state().last_holder_commitment -= 1;
7593                 keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
7594
7595                 keys.get_enforcement_state().last_holder_commitment -= 1;
7596                 next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
7597                         &SecretKey::from_slice(&keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
7598         }
7599
7600         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(),
7601                 &msgs::RevokeAndACK {
7602                         channel_id,
7603                         per_commitment_secret,
7604                         next_per_commitment_point,
7605                         #[cfg(taproot)]
7606                         next_local_nonce: None,
7607                 });
7608         assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
7609         check_added_monitors!(nodes[1], 1);
7610         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() });
7611 }
7612
7613 #[test]
7614 fn test_bump_txn_sanitize_tracking_maps() {
7615         // Sanitizing pendning_claim_request and claimable_outpoints used to be buggy,
7616         // verify we clean then right after expiration of ANTI_REORG_DELAY.
7617
7618         let chanmon_cfgs = create_chanmon_cfgs(2);
7619         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7620         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7621         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7622
7623         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7624         // Lock HTLC in both directions
7625         let (payment_preimage_1, _, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000);
7626         let (_, payment_hash_2, _) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000);
7627
7628         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
7629         assert_eq!(revoked_local_txn[0].input.len(), 1);
7630         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
7631
7632         // Revoke local commitment tx
7633         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
7634
7635         // Broadcast set of revoked txn on A
7636         connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH);
7637         expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[0], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
7638         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
7639
7640         mine_transaction(&nodes[0], &revoked_local_txn[0]);
7641         check_closed_broadcast!(nodes[0], true);
7642         check_added_monitors!(nodes[0], 1);
7643         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
7644         let penalty_txn = {
7645                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7646                 assert_eq!(node_txn.len(), 3); //ChannelMonitor: justice txn * 3
7647                 check_spends!(node_txn[0], revoked_local_txn[0]);
7648                 check_spends!(node_txn[1], revoked_local_txn[0]);
7649                 check_spends!(node_txn[2], revoked_local_txn[0]);
7650                 let penalty_txn = vec![node_txn[0].clone(), node_txn[1].clone(), node_txn[2].clone()];
7651                 node_txn.clear();
7652                 penalty_txn
7653         };
7654         connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, penalty_txn));
7655         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7656         {
7657                 let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(OutPoint { txid: chan.3.txid(), index: 0 }).unwrap();
7658                 assert!(monitor.inner.lock().unwrap().onchain_tx_handler.pending_claim_requests.is_empty());
7659                 assert!(monitor.inner.lock().unwrap().onchain_tx_handler.claimable_outpoints.is_empty());
7660         }
7661 }
7662
7663 #[test]
7664 fn test_channel_conf_timeout() {
7665         // Tests that, for inbound channels, we give up on them if the funding transaction does not
7666         // confirm within 2016 blocks, as recommended by BOLT 2.
7667         let chanmon_cfgs = create_chanmon_cfgs(2);
7668         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7669         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7670         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7671
7672         let _funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 100_000);
7673
7674         // The outbound node should wait forever for confirmation:
7675         // This matches `channel::FUNDING_CONF_DEADLINE_BLOCKS` and BOLT 2's suggested timeout, thus is
7676         // copied here instead of directly referencing the constant.
7677         connect_blocks(&nodes[0], 2016);
7678         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7679
7680         // The inbound node should fail the channel after exactly 2016 blocks
7681         connect_blocks(&nodes[1], 2015);
7682         check_added_monitors!(nodes[1], 0);
7683         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7684
7685         connect_blocks(&nodes[1], 1);
7686         check_added_monitors!(nodes[1], 1);
7687         check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut);
7688         let close_ev = nodes[1].node.get_and_clear_pending_msg_events();
7689         assert_eq!(close_ev.len(), 1);
7690         match close_ev[0] {
7691                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, ref node_id } => {
7692                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
7693                         assert_eq!(msg.data, "Channel closed because funding transaction failed to confirm within 2016 blocks");
7694                 },
7695                 _ => panic!("Unexpected event"),
7696         }
7697 }
7698
7699 #[test]
7700 fn test_override_channel_config() {
7701         let chanmon_cfgs = create_chanmon_cfgs(2);
7702         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7703         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7704         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7705
7706         // Node0 initiates a channel to node1 using the override config.
7707         let mut override_config = UserConfig::default();
7708         override_config.channel_handshake_config.our_to_self_delay = 200;
7709
7710         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, Some(override_config)).unwrap();
7711
7712         // Assert the channel created by node0 is using the override config.
7713         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7714         assert_eq!(res.channel_flags, 0);
7715         assert_eq!(res.to_self_delay, 200);
7716 }
7717
7718 #[test]
7719 fn test_override_0msat_htlc_minimum() {
7720         let mut zero_config = UserConfig::default();
7721         zero_config.channel_handshake_config.our_htlc_minimum_msat = 0;
7722         let chanmon_cfgs = create_chanmon_cfgs(2);
7723         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7724         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(zero_config.clone())]);
7725         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7726
7727         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, Some(zero_config)).unwrap();
7728         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7729         assert_eq!(res.htlc_minimum_msat, 1);
7730
7731         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
7732         let res = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
7733         assert_eq!(res.htlc_minimum_msat, 1);
7734 }
7735
7736 #[test]
7737 fn test_channel_update_has_correct_htlc_maximum_msat() {
7738         // Tests that the `ChannelUpdate` message has the correct values for `htlc_maximum_msat` set.
7739         // Bolt 7 specifies that if present `htlc_maximum_msat`:
7740         // 1. MUST be set to less than or equal to the channel capacity. In LDK, this is capped to
7741         // 90% of the `channel_value`.
7742         // 2. MUST be set to less than or equal to the `max_htlc_value_in_flight_msat` received from the peer.
7743
7744         let mut config_30_percent = UserConfig::default();
7745         config_30_percent.channel_handshake_config.announced_channel = true;
7746         config_30_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 30;
7747         let mut config_50_percent = UserConfig::default();
7748         config_50_percent.channel_handshake_config.announced_channel = true;
7749         config_50_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 50;
7750         let mut config_95_percent = UserConfig::default();
7751         config_95_percent.channel_handshake_config.announced_channel = true;
7752         config_95_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 95;
7753         let mut config_100_percent = UserConfig::default();
7754         config_100_percent.channel_handshake_config.announced_channel = true;
7755         config_100_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100;
7756
7757         let chanmon_cfgs = create_chanmon_cfgs(4);
7758         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
7759         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[Some(config_30_percent), Some(config_50_percent), Some(config_95_percent), Some(config_100_percent)]);
7760         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
7761
7762         let channel_value_satoshis = 100000;
7763         let channel_value_msat = channel_value_satoshis * 1000;
7764         let channel_value_30_percent_msat = (channel_value_msat as f64 * 0.3) as u64;
7765         let channel_value_50_percent_msat = (channel_value_msat as f64 * 0.5) as u64;
7766         let channel_value_90_percent_msat = (channel_value_msat as f64 * 0.9) as u64;
7767
7768         let (node_0_chan_update, node_1_chan_update, _, _)  = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value_satoshis, 10001);
7769         let (node_2_chan_update, node_3_chan_update, _, _)  = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, channel_value_satoshis, 10001);
7770
7771         // Assert that `node[0]`'s `ChannelUpdate` is capped at 50 percent of the `channel_value`, as
7772         // that's the value of `node[1]`'s `holder_max_htlc_value_in_flight_msat`.
7773         assert_eq!(node_0_chan_update.contents.htlc_maximum_msat, channel_value_50_percent_msat);
7774         // Assert that `node[1]`'s `ChannelUpdate` is capped at 30 percent of the `channel_value`, as
7775         // that's the value of `node[0]`'s `holder_max_htlc_value_in_flight_msat`.
7776         assert_eq!(node_1_chan_update.contents.htlc_maximum_msat, channel_value_30_percent_msat);
7777
7778         // Assert that `node[2]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
7779         // the value of `node[3]`'s `holder_max_htlc_value_in_flight_msat` (100%), exceeds 90% of the
7780         // `channel_value`.
7781         assert_eq!(node_2_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
7782         // Assert that `node[3]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
7783         // the value of `node[2]`'s `holder_max_htlc_value_in_flight_msat` (95%), exceeds 90% of the
7784         // `channel_value`.
7785         assert_eq!(node_3_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
7786 }
7787
7788 #[test]
7789 fn test_manually_accept_inbound_channel_request() {
7790         let mut manually_accept_conf = UserConfig::default();
7791         manually_accept_conf.manually_accept_inbound_channels = true;
7792         let chanmon_cfgs = create_chanmon_cfgs(2);
7793         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7794         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
7795         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7796
7797         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap();
7798         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7799
7800         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
7801
7802         // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
7803         // accepting the inbound channel request.
7804         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
7805
7806         let events = nodes[1].node.get_and_clear_pending_events();
7807         match events[0] {
7808                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
7809                         nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23).unwrap();
7810                 }
7811                 _ => panic!("Unexpected event"),
7812         }
7813
7814         let accept_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
7815         assert_eq!(accept_msg_ev.len(), 1);
7816
7817         match accept_msg_ev[0] {
7818                 MessageSendEvent::SendAcceptChannel { ref node_id, .. } => {
7819                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
7820                 }
7821                 _ => panic!("Unexpected event"),
7822         }
7823
7824         nodes[1].node.force_close_broadcasting_latest_txn(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
7825
7826         let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
7827         assert_eq!(close_msg_ev.len(), 1);
7828
7829         let events = nodes[1].node.get_and_clear_pending_events();
7830         match events[0] {
7831                 Event::ChannelClosed { user_channel_id, .. } => {
7832                         assert_eq!(user_channel_id, 23);
7833                 }
7834                 _ => panic!("Unexpected event"),
7835         }
7836 }
7837
7838 #[test]
7839 fn test_manually_reject_inbound_channel_request() {
7840         let mut manually_accept_conf = UserConfig::default();
7841         manually_accept_conf.manually_accept_inbound_channels = true;
7842         let chanmon_cfgs = create_chanmon_cfgs(2);
7843         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7844         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
7845         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7846
7847         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap();
7848         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7849
7850         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
7851
7852         // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
7853         // rejecting the inbound channel request.
7854         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
7855
7856         let events = nodes[1].node.get_and_clear_pending_events();
7857         match events[0] {
7858                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
7859                         nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
7860                 }
7861                 _ => panic!("Unexpected event"),
7862         }
7863
7864         let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
7865         assert_eq!(close_msg_ev.len(), 1);
7866
7867         match close_msg_ev[0] {
7868                 MessageSendEvent::HandleError { ref node_id, .. } => {
7869                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
7870                 }
7871                 _ => panic!("Unexpected event"),
7872         }
7873         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
7874 }
7875
7876 #[test]
7877 fn test_reject_funding_before_inbound_channel_accepted() {
7878         // This tests that when `UserConfig::manually_accept_inbound_channels` is set to true, inbound
7879         // channels must to be manually accepted through `ChannelManager::accept_inbound_channel` by
7880         // the node operator before the counterparty sends a `FundingCreated` message. If a
7881         // `FundingCreated` message is received before the channel is accepted, it should be rejected
7882         // and the channel should be closed.
7883         let mut manually_accept_conf = UserConfig::default();
7884         manually_accept_conf.manually_accept_inbound_channels = true;
7885         let chanmon_cfgs = create_chanmon_cfgs(2);
7886         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7887         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
7888         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7889
7890         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap();
7891         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7892         let temp_channel_id = res.temporary_channel_id;
7893
7894         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
7895
7896         // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in the `msg_events`.
7897         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
7898
7899         // Clear the `Event::OpenChannelRequest` event without responding to the request.
7900         nodes[1].node.get_and_clear_pending_events();
7901
7902         // Get the `AcceptChannel` message of `nodes[1]` without calling
7903         // `ChannelManager::accept_inbound_channel`, which generates a
7904         // `MessageSendEvent::SendAcceptChannel` event. The message is passed to `nodes[0]`
7905         // `handle_accept_channel`, which is required in order for `create_funding_transaction` to
7906         // succeed when `nodes[0]` is passed to it.
7907         let accept_chan_msg = {
7908                 let mut node_1_per_peer_lock;
7909                 let mut node_1_peer_state_lock;
7910                 let channel =  get_inbound_v1_channel_ref!(&nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, temp_channel_id);
7911                 channel.get_accept_channel_message()
7912         };
7913         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
7914
7915         let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
7916
7917         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
7918         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
7919
7920         // The `funding_created_msg` should be rejected by `nodes[1]` as it hasn't accepted the channel
7921         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
7922
7923         let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
7924         assert_eq!(close_msg_ev.len(), 1);
7925
7926         let expected_err = "FundingCreated message received before the channel was accepted";
7927         match close_msg_ev[0] {
7928                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, ref node_id, } => {
7929                         assert_eq!(msg.channel_id, temp_channel_id);
7930                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
7931                         assert_eq!(msg.data, expected_err);
7932                 }
7933                 _ => panic!("Unexpected event"),
7934         }
7935
7936         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() });
7937 }
7938
7939 #[test]
7940 fn test_can_not_accept_inbound_channel_twice() {
7941         let mut manually_accept_conf = UserConfig::default();
7942         manually_accept_conf.manually_accept_inbound_channels = true;
7943         let chanmon_cfgs = create_chanmon_cfgs(2);
7944         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7945         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
7946         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7947
7948         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap();
7949         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7950
7951         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
7952
7953         // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
7954         // accepting the inbound channel request.
7955         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
7956
7957         let events = nodes[1].node.get_and_clear_pending_events();
7958         match events[0] {
7959                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
7960                         nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
7961                         let api_res = nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0);
7962                         match api_res {
7963                                 Err(APIError::APIMisuseError { err }) => {
7964                                         assert_eq!(err, "The channel isn't currently awaiting to be accepted.");
7965                                 },
7966                                 Ok(_) => panic!("Channel shouldn't be possible to be accepted twice"),
7967                                 Err(_) => panic!("Unexpected Error"),
7968                         }
7969                 }
7970                 _ => panic!("Unexpected event"),
7971         }
7972
7973         // Ensure that the channel wasn't closed after attempting to accept it twice.
7974         let accept_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
7975         assert_eq!(accept_msg_ev.len(), 1);
7976
7977         match accept_msg_ev[0] {
7978                 MessageSendEvent::SendAcceptChannel { ref node_id, .. } => {
7979                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
7980                 }
7981                 _ => panic!("Unexpected event"),
7982         }
7983 }
7984
7985 #[test]
7986 fn test_can_not_accept_unknown_inbound_channel() {
7987         let chanmon_cfg = create_chanmon_cfgs(2);
7988         let node_cfg = create_node_cfgs(2, &chanmon_cfg);
7989         let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
7990         let nodes = create_network(2, &node_cfg, &node_chanmgr);
7991
7992         let unknown_channel_id = [0; 32];
7993         let api_res = nodes[0].node.accept_inbound_channel(&unknown_channel_id, &nodes[1].node.get_our_node_id(), 0);
7994         match api_res {
7995                 Err(APIError::ChannelUnavailable { err }) => {
7996                         assert_eq!(err, format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(unknown_channel_id), nodes[1].node.get_our_node_id()));
7997                 },
7998                 Ok(_) => panic!("It shouldn't be possible to accept an unkown channel"),
7999                 Err(_) => panic!("Unexpected Error"),
8000         }
8001 }
8002
8003 #[test]
8004 fn test_onion_value_mpp_set_calculation() {
8005         // Test that we use the onion value `amt_to_forward` when
8006         // calculating whether we've reached the `total_msat` of an MPP
8007         // by having a routing node forward more than `amt_to_forward`
8008         // and checking that the receiving node doesn't generate
8009         // a PaymentClaimable event too early
8010         let node_count = 4;
8011         let chanmon_cfgs = create_chanmon_cfgs(node_count);
8012         let node_cfgs = create_node_cfgs(node_count, &chanmon_cfgs);
8013         let node_chanmgrs = create_node_chanmgrs(node_count, &node_cfgs, &vec![None; node_count]);
8014         let mut nodes = create_network(node_count, &node_cfgs, &node_chanmgrs);
8015
8016         let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8017         let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
8018         let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
8019         let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
8020
8021         let total_msat = 100_000;
8022         let expected_paths: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]];
8023         let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], total_msat);
8024         let sample_path = route.paths.pop().unwrap();
8025
8026         let mut path_1 = sample_path.clone();
8027         path_1.hops[0].pubkey = nodes[1].node.get_our_node_id();
8028         path_1.hops[0].short_channel_id = chan_1_id;
8029         path_1.hops[1].pubkey = nodes[3].node.get_our_node_id();
8030         path_1.hops[1].short_channel_id = chan_3_id;
8031         path_1.hops[1].fee_msat = 100_000;
8032         route.paths.push(path_1);
8033
8034         let mut path_2 = sample_path.clone();
8035         path_2.hops[0].pubkey = nodes[2].node.get_our_node_id();
8036         path_2.hops[0].short_channel_id = chan_2_id;
8037         path_2.hops[1].pubkey = nodes[3].node.get_our_node_id();
8038         path_2.hops[1].short_channel_id = chan_4_id;
8039         path_2.hops[1].fee_msat = 1_000;
8040         route.paths.push(path_2);
8041
8042         // Send payment
8043         let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes());
8044         let onion_session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
8045                 RecipientOnionFields::secret_only(our_payment_secret), payment_id, &route).unwrap();
8046         nodes[0].node.test_send_payment_internal(&route, our_payment_hash,
8047                 RecipientOnionFields::secret_only(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap();
8048         check_added_monitors!(nodes[0], expected_paths.len());
8049
8050         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8051         assert_eq!(events.len(), expected_paths.len());
8052
8053         // First path
8054         let ev = remove_first_msg_event_to_node(&expected_paths[0][0].node.get_our_node_id(), &mut events);
8055         let mut payment_event = SendEvent::from_event(ev);
8056         let mut prev_node = &nodes[0];
8057
8058         for (idx, &node) in expected_paths[0].iter().enumerate() {
8059                 assert_eq!(node.node.get_our_node_id(), payment_event.node_id);
8060
8061                 if idx == 0 { // routing node
8062                         let session_priv = [3; 32];
8063                         let height = nodes[0].best_block_info().1;
8064                         let session_priv = SecretKey::from_slice(&session_priv).unwrap();
8065                         let mut onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
8066                         let (mut onion_payloads, _, _) = onion_utils::build_onion_payloads(&route.paths[0], 100_000,
8067                                 RecipientOnionFields::secret_only(our_payment_secret), height + 1, &None).unwrap();
8068                         // Edit amt_to_forward to simulate the sender having set
8069                         // the final amount and the routing node taking less fee
8070                         onion_payloads[1].amt_to_forward = 99_000;
8071                         let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap();
8072                         payment_event.msgs[0].onion_routing_packet = new_onion_packet;
8073                 }
8074
8075                 node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]);
8076                 check_added_monitors!(node, 0);
8077                 commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false);
8078                 expect_pending_htlcs_forwardable!(node);
8079
8080                 if idx == 0 {
8081                         let mut events_2 = node.node.get_and_clear_pending_msg_events();
8082                         assert_eq!(events_2.len(), 1);
8083                         check_added_monitors!(node, 1);
8084                         payment_event = SendEvent::from_event(events_2.remove(0));
8085                         assert_eq!(payment_event.msgs.len(), 1);
8086                 } else {
8087                         let events_2 = node.node.get_and_clear_pending_events();
8088                         assert!(events_2.is_empty());
8089                 }
8090
8091                 prev_node = node;
8092         }
8093
8094         // Second path
8095         let ev = remove_first_msg_event_to_node(&expected_paths[1][0].node.get_our_node_id(), &mut events);
8096         pass_along_path(&nodes[0], expected_paths[1], 101_000, our_payment_hash.clone(), Some(our_payment_secret), ev, true, None);
8097
8098         claim_payment_along_route(&nodes[0], expected_paths, false, our_payment_preimage);
8099 }
8100
8101 fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64) {
8102
8103         let routing_node_count = msat_amounts.len();
8104         let node_count = routing_node_count + 2;
8105
8106         let chanmon_cfgs = create_chanmon_cfgs(node_count);
8107         let node_cfgs = create_node_cfgs(node_count, &chanmon_cfgs);
8108         let node_chanmgrs = create_node_chanmgrs(node_count, &node_cfgs, &vec![None; node_count]);
8109         let nodes = create_network(node_count, &node_cfgs, &node_chanmgrs);
8110
8111         let src_idx = 0;
8112         let dst_idx = 1;
8113
8114         // Create channels for each amount
8115         let mut expected_paths = Vec::with_capacity(routing_node_count);
8116         let mut src_chan_ids = Vec::with_capacity(routing_node_count);
8117         let mut dst_chan_ids = Vec::with_capacity(routing_node_count);
8118         for i in 0..routing_node_count {
8119                 let routing_node = 2 + i;
8120                 let src_chan_id = create_announced_chan_between_nodes(&nodes, src_idx, routing_node).0.contents.short_channel_id;
8121                 src_chan_ids.push(src_chan_id);
8122                 let dst_chan_id = create_announced_chan_between_nodes(&nodes, routing_node, dst_idx).0.contents.short_channel_id;
8123                 dst_chan_ids.push(dst_chan_id);
8124                 let path = vec![&nodes[routing_node], &nodes[dst_idx]];
8125                 expected_paths.push(path);
8126         }
8127         let expected_paths: Vec<&[&Node]> = expected_paths.iter().map(|route| route.as_slice()).collect();
8128
8129         // Create a route for each amount
8130         let example_amount = 100000;
8131         let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[src_idx], nodes[dst_idx], example_amount);
8132         let sample_path = route.paths.pop().unwrap();
8133         for i in 0..routing_node_count {
8134                 let routing_node = 2 + i;
8135                 let mut path = sample_path.clone();
8136                 path.hops[0].pubkey = nodes[routing_node].node.get_our_node_id();
8137                 path.hops[0].short_channel_id = src_chan_ids[i];
8138                 path.hops[1].pubkey = nodes[dst_idx].node.get_our_node_id();
8139                 path.hops[1].short_channel_id = dst_chan_ids[i];
8140                 path.hops[1].fee_msat = msat_amounts[i];
8141                 route.paths.push(path);
8142         }
8143
8144         // Send payment with manually set total_msat
8145         let payment_id = PaymentId(nodes[src_idx].keys_manager.backing.get_secure_random_bytes());
8146         let onion_session_privs = nodes[src_idx].node.test_add_new_pending_payment(our_payment_hash,
8147                 RecipientOnionFields::secret_only(our_payment_secret), payment_id, &route).unwrap();
8148         nodes[src_idx].node.test_send_payment_internal(&route, our_payment_hash,
8149                 RecipientOnionFields::secret_only(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap();
8150         check_added_monitors!(nodes[src_idx], expected_paths.len());
8151
8152         let mut events = nodes[src_idx].node.get_and_clear_pending_msg_events();
8153         assert_eq!(events.len(), expected_paths.len());
8154         let mut amount_received = 0;
8155         for (path_idx, expected_path) in expected_paths.iter().enumerate() {
8156                 let ev = remove_first_msg_event_to_node(&expected_path[0].node.get_our_node_id(), &mut events);
8157
8158                 let current_path_amount = msat_amounts[path_idx];
8159                 amount_received += current_path_amount;
8160                 let became_claimable_now = amount_received >= total_msat && amount_received - current_path_amount < total_msat;
8161                 pass_along_path(&nodes[src_idx], expected_path, amount_received, our_payment_hash.clone(), Some(our_payment_secret), ev, became_claimable_now, None);
8162         }
8163
8164         claim_payment_along_route(&nodes[src_idx], &expected_paths, false, our_payment_preimage);
8165 }
8166
8167 #[test]
8168 fn test_overshoot_mpp() {
8169         do_test_overshoot_mpp(&[100_000, 101_000], 200_000);
8170         do_test_overshoot_mpp(&[100_000, 10_000, 100_000], 200_000);
8171 }
8172
8173 #[test]
8174 fn test_simple_mpp() {
8175         // Simple test of sending a multi-path payment.
8176         let chanmon_cfgs = create_chanmon_cfgs(4);
8177         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
8178         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
8179         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
8180
8181         let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8182         let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
8183         let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
8184         let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
8185
8186         let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
8187         let path = route.paths[0].clone();
8188         route.paths.push(path);
8189         route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id();
8190         route.paths[0].hops[0].short_channel_id = chan_1_id;
8191         route.paths[0].hops[1].short_channel_id = chan_3_id;
8192         route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id();
8193         route.paths[1].hops[0].short_channel_id = chan_2_id;
8194         route.paths[1].hops[1].short_channel_id = chan_4_id;
8195         send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret);
8196         claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
8197 }
8198
8199 #[test]
8200 fn test_preimage_storage() {
8201         // Simple test of payment preimage storage allowing no client-side storage to claim payments
8202         let chanmon_cfgs = create_chanmon_cfgs(2);
8203         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8204         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8205         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8206
8207         create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8208
8209         {
8210                 let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 7200, None).unwrap();
8211                 let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
8212                 nodes[0].node.send_payment_with_route(&route, payment_hash,
8213                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
8214                 check_added_monitors!(nodes[0], 1);
8215                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8216                 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
8217                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8218                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8219         }
8220         // Note that after leaving the above scope we have no knowledge of any arguments or return
8221         // values from previous calls.
8222         expect_pending_htlcs_forwardable!(nodes[1]);
8223         let events = nodes[1].node.get_and_clear_pending_events();
8224         assert_eq!(events.len(), 1);
8225         match events[0] {
8226                 Event::PaymentClaimable { ref purpose, .. } => {
8227                         match &purpose {
8228                                 PaymentPurpose::InvoicePayment { payment_preimage, .. } => {
8229                                         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage.unwrap());
8230                                 },
8231                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
8232                         }
8233                 },
8234                 _ => panic!("Unexpected event"),
8235         }
8236 }
8237
8238 #[test]
8239 fn test_bad_secret_hash() {
8240         // Simple test of unregistered payment hash/invalid payment secret handling
8241         let chanmon_cfgs = create_chanmon_cfgs(2);
8242         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8243         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8244         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8245
8246         create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8247
8248         let random_payment_hash = PaymentHash([42; 32]);
8249         let random_payment_secret = PaymentSecret([43; 32]);
8250         let (our_payment_hash, our_payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 2, None).unwrap();
8251         let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
8252
8253         // All the below cases should end up being handled exactly identically, so we macro the
8254         // resulting events.
8255         macro_rules! handle_unknown_invalid_payment_data {
8256                 ($payment_hash: expr) => {
8257                         check_added_monitors!(nodes[0], 1);
8258                         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8259                         let payment_event = SendEvent::from_event(events.pop().unwrap());
8260                         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8261                         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8262
8263                         // We have to forward pending HTLCs once to process the receipt of the HTLC and then
8264                         // again to process the pending backwards-failure of the HTLC
8265                         expect_pending_htlcs_forwardable!(nodes[1]);
8266                         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment{ payment_hash: $payment_hash }]);
8267                         check_added_monitors!(nodes[1], 1);
8268
8269                         // We should fail the payment back
8270                         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
8271                         match events.pop().unwrap() {
8272                                 MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } } => {
8273                                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
8274                                         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
8275                                 },
8276                                 _ => panic!("Unexpected event"),
8277                         }
8278                 }
8279         }
8280
8281         let expected_error_code = 0x4000|15; // incorrect_or_unknown_payment_details
8282         // Error data is the HTLC value (100,000) and current block height
8283         let expected_error_data = [0, 0, 0, 0, 0, 1, 0x86, 0xa0, 0, 0, 0, CHAN_CONFIRM_DEPTH as u8];
8284
8285         // Send a payment with the right payment hash but the wrong payment secret
8286         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
8287                 RecipientOnionFields::secret_only(random_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
8288         handle_unknown_invalid_payment_data!(our_payment_hash);
8289         expect_payment_failed!(nodes[0], our_payment_hash, true, expected_error_code, expected_error_data);
8290
8291         // Send a payment with a random payment hash, but the right payment secret
8292         nodes[0].node.send_payment_with_route(&route, random_payment_hash,
8293                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(random_payment_hash.0)).unwrap();
8294         handle_unknown_invalid_payment_data!(random_payment_hash);
8295         expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
8296
8297         // Send a payment with a random payment hash and random payment secret
8298         nodes[0].node.send_payment_with_route(&route, random_payment_hash,
8299                 RecipientOnionFields::secret_only(random_payment_secret), PaymentId(random_payment_hash.0)).unwrap();
8300         handle_unknown_invalid_payment_data!(random_payment_hash);
8301         expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
8302 }
8303
8304 #[test]
8305 fn test_update_err_monitor_lockdown() {
8306         // Our monitor will lock update of local commitment transaction if a broadcastion condition
8307         // has been fulfilled (either force-close from Channel or block height requiring a HTLC-
8308         // timeout). Trying to update monitor after lockdown should return a ChannelMonitorUpdateStatus
8309         // error.
8310         //
8311         // This scenario may happen in a watchtower setup, where watchtower process a block height
8312         // triggering a timeout while a slow-block-processing ChannelManager receives a local signed
8313         // commitment at same time.
8314
8315         let chanmon_cfgs = create_chanmon_cfgs(2);
8316         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8317         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8318         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8319
8320         // Create some initial channel
8321         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
8322         let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 };
8323
8324         // Rebalance the network to generate htlc in the two directions
8325         send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
8326
8327         // Route a HTLC from node 0 to node 1 (but don't settle)
8328         let (preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
8329
8330         // Copy ChainMonitor to simulate a watchtower and update block height of node 0 until its ChannelMonitor timeout HTLC onchain
8331         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8332         let logger = test_utils::TestLogger::with_id(format!("node {}", 0));
8333         let persister = test_utils::TestPersister::new();
8334         let watchtower = {
8335                 let new_monitor = {
8336                         let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8337                         let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
8338                                         &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8339                         assert!(new_monitor == *monitor);
8340                         new_monitor
8341                 };
8342                 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8343                 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
8344                 watchtower
8345         };
8346         let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new());
8347         // Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating
8348         // transaction lock time requirements here.
8349         chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize(200, (block.clone(), 200));
8350         watchtower.chain_monitor.block_connected(&block, 200);
8351
8352         // Try to update ChannelMonitor
8353         nodes[1].node.claim_funds(preimage);
8354         check_added_monitors!(nodes[1], 1);
8355         expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
8356
8357         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8358         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
8359         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
8360         {
8361                 let mut node_0_per_peer_lock;
8362                 let mut node_0_peer_state_lock;
8363                 let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2);
8364                 if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
8365                         assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
8366                         assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8367                 } else { assert!(false); }
8368         }
8369         // Our local monitor is in-sync and hasn't processed yet timeout
8370         check_added_monitors!(nodes[0], 1);
8371         let events = nodes[0].node.get_and_clear_pending_events();
8372         assert_eq!(events.len(), 1);
8373 }
8374
8375 #[test]
8376 fn test_concurrent_monitor_claim() {
8377         // Watchtower A receives block, broadcasts state N, then channel receives new state N+1,
8378         // sending it to both watchtowers, Bob accepts N+1, then receives block and broadcasts
8379         // the latest state N+1, Alice rejects state N+1, but Bob has already broadcast it,
8380         // state N+1 confirms. Alice claims output from state N+1.
8381
8382         let chanmon_cfgs = create_chanmon_cfgs(2);
8383         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8384         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8385         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8386
8387         // Create some initial channel
8388         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
8389         let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 };
8390
8391         // Rebalance the network to generate htlc in the two directions
8392         send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
8393
8394         // Route a HTLC from node 0 to node 1 (but don't settle)
8395         route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
8396
8397         // Copy ChainMonitor to simulate watchtower Alice and update block height her ChannelMonitor timeout HTLC onchain
8398         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8399         let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice"));
8400         let persister = test_utils::TestPersister::new();
8401         let alice_broadcaster = test_utils::TestBroadcaster::with_blocks(
8402                 Arc::new(Mutex::new(nodes[0].blocks.lock().unwrap().clone())),
8403         );
8404         let watchtower_alice = {
8405                 let new_monitor = {
8406                         let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8407                         let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
8408                                         &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8409                         assert!(new_monitor == *monitor);
8410                         new_monitor
8411                 };
8412                 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &alice_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8413                 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
8414                 watchtower
8415         };
8416         let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new());
8417         // Make Alice aware of enough blocks that it doesn't think we're violating transaction lock time
8418         // requirements here.
8419         const HTLC_TIMEOUT_BROADCAST: u32 = CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS;
8420         alice_broadcaster.blocks.lock().unwrap().resize((HTLC_TIMEOUT_BROADCAST) as usize, (block.clone(), HTLC_TIMEOUT_BROADCAST));
8421         watchtower_alice.chain_monitor.block_connected(&block, HTLC_TIMEOUT_BROADCAST);
8422
8423         // Watchtower Alice should have broadcast a commitment/HTLC-timeout
8424         let alice_state = {
8425                 let mut txn = alice_broadcaster.txn_broadcast();
8426                 assert_eq!(txn.len(), 2);
8427                 txn.remove(0)
8428         };
8429
8430         // Copy ChainMonitor to simulate watchtower Bob and make it receive a commitment update first.
8431         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8432         let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob"));
8433         let persister = test_utils::TestPersister::new();
8434         let bob_broadcaster = test_utils::TestBroadcaster::with_blocks(Arc::clone(&alice_broadcaster.blocks));
8435         let watchtower_bob = {
8436                 let new_monitor = {
8437                         let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8438                         let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
8439                                         &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8440                         assert!(new_monitor == *monitor);
8441                         new_monitor
8442                 };
8443                 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &bob_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8444                 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
8445                 watchtower
8446         };
8447         watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST - 1);
8448
8449         // Route another payment to generate another update with still previous HTLC pending
8450         let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 3000000);
8451         nodes[1].node.send_payment_with_route(&route, payment_hash,
8452                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
8453         check_added_monitors!(nodes[1], 1);
8454
8455         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8456         assert_eq!(updates.update_add_htlcs.len(), 1);
8457         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]);
8458         {
8459                 let mut node_0_per_peer_lock;
8460                 let mut node_0_peer_state_lock;
8461                 let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2);
8462                 if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
8463                         // Watchtower Alice should already have seen the block and reject the update
8464                         assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
8465                         assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8466                         assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8467                 } else { assert!(false); }
8468         }
8469         // Our local monitor is in-sync and hasn't processed yet timeout
8470         check_added_monitors!(nodes[0], 1);
8471
8472         //// Provide one more block to watchtower Bob, expect broadcast of commitment and HTLC-Timeout
8473         watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST);
8474
8475         // Watchtower Bob should have broadcast a commitment/HTLC-timeout
8476         let bob_state_y;
8477         {
8478                 let mut txn = bob_broadcaster.txn_broadcast();
8479                 assert_eq!(txn.len(), 2);
8480                 bob_state_y = txn.remove(0);
8481         };
8482
8483         // We confirm Bob's state Y on Alice, she should broadcast a HTLC-timeout
8484         let height = HTLC_TIMEOUT_BROADCAST + 1;
8485         connect_blocks(&nodes[0], height - nodes[0].best_block_info().1);
8486         check_closed_broadcast(&nodes[0], 1, true);
8487         check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false);
8488         watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height);
8489         check_added_monitors(&nodes[0], 1);
8490         {
8491                 let htlc_txn = alice_broadcaster.txn_broadcast();
8492                 assert_eq!(htlc_txn.len(), 2);
8493                 check_spends!(htlc_txn[0], bob_state_y);
8494                 // Alice doesn't clean up the old HTLC claim since it hasn't seen a conflicting spend for
8495                 // it. However, she should, because it now has an invalid parent.
8496                 check_spends!(htlc_txn[1], alice_state);
8497         }
8498 }
8499
8500 #[test]
8501 fn test_pre_lockin_no_chan_closed_update() {
8502         // Test that if a peer closes a channel in response to a funding_created message we don't
8503         // generate a channel update (as the channel cannot appear on chain without a funding_signed
8504         // message).
8505         //
8506         // Doing so would imply a channel monitor update before the initial channel monitor
8507         // registration, violating our API guarantees.
8508         //
8509         // Previously, full_stack_target managed to hit this case by opening then closing a channel,
8510         // then opening a second channel with the same funding output as the first (which is not
8511         // rejected because the first channel does not exist in the ChannelManager) and closing it
8512         // before receiving funding_signed.
8513         let chanmon_cfgs = create_chanmon_cfgs(2);
8514         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8515         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8516         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8517
8518         // Create an initial channel
8519         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
8520         let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8521         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
8522         let accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
8523         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
8524
8525         // Move the first channel through the funding flow...
8526         let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
8527
8528         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
8529         check_added_monitors!(nodes[0], 0);
8530
8531         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
8532         let channel_id = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
8533         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
8534         assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
8535         check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true);
8536 }
8537
8538 #[test]
8539 fn test_htlc_no_detection() {
8540         // This test is a mutation to underscore the detection logic bug we had
8541         // before #653. HTLC value routed is above the remaining balance, thus
8542         // inverting HTLC and `to_remote` output. HTLC will come second and
8543         // it wouldn't be seen by pre-#653 detection as we were enumerate()'ing
8544         // on a watched outputs vector (Vec<TxOut>) thus implicitly relying on
8545         // outputs order detection for correct spending children filtring.
8546
8547         let chanmon_cfgs = create_chanmon_cfgs(2);
8548         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8549         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8550         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8551
8552         // Create some initial channels
8553         let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
8554
8555         send_payment(&nodes[0], &vec!(&nodes[1])[..], 1_000_000);
8556         let (_, our_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 2_000_000);
8557         let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
8558         assert_eq!(local_txn[0].input.len(), 1);
8559         assert_eq!(local_txn[0].output.len(), 3);
8560         check_spends!(local_txn[0], chan_1.3);
8561
8562         // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
8563         let block = create_dummy_block(nodes[0].best_block_hash(), 42, vec![local_txn[0].clone()]);
8564         connect_block(&nodes[0], &block);
8565         // We deliberately connect the local tx twice as this should provoke a failure calling
8566         // this test before #653 fix.
8567         chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &block, nodes[0].best_block_info().1 + 1);
8568         check_closed_broadcast!(nodes[0], true);
8569         check_added_monitors!(nodes[0], 1);
8570         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
8571         connect_blocks(&nodes[0], TEST_FINAL_CLTV);
8572
8573         let htlc_timeout = {
8574                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8575                 assert_eq!(node_txn.len(), 1);
8576                 assert_eq!(node_txn[0].input.len(), 1);
8577                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
8578                 check_spends!(node_txn[0], local_txn[0]);
8579                 node_txn[0].clone()
8580         };
8581
8582         connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![htlc_timeout.clone()]));
8583         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
8584         expect_payment_failed!(nodes[0], our_payment_hash, false);
8585 }
8586
8587 fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain_before_fulfill: bool) {
8588         // If we route an HTLC, then learn the HTLC's preimage after the upstream channel has been
8589         // force-closed, we must claim that HTLC on-chain. (Given an HTLC forwarded from Alice --> Bob -->
8590         // Carol, Alice would be the upstream node, and Carol the downstream.)
8591         //
8592         // Steps of the test:
8593         // 1) Alice sends a HTLC to Carol through Bob.
8594         // 2) Carol doesn't settle the HTLC.
8595         // 3) If broadcast_alice is true, Alice force-closes her channel with Bob. Else Bob force closes.
8596         // Steps 4 and 5 may be reordered depending on go_onchain_before_fulfill.
8597         // 4) Bob sees the Alice's commitment on his chain or vice versa. An offered output is present
8598         //    but can't be claimed as Bob doesn't have yet knowledge of the preimage.
8599         // 5) Carol release the preimage to Bob off-chain.
8600         // 6) Bob claims the offered output on the broadcasted commitment.
8601         let chanmon_cfgs = create_chanmon_cfgs(3);
8602         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
8603         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
8604         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
8605
8606         // Create some initial channels
8607         let chan_ab = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
8608         create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 10001);
8609
8610         // Steps (1) and (2):
8611         // Send an HTLC Alice --> Bob --> Carol, but Carol doesn't settle the HTLC back.
8612         let (payment_preimage, payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
8613
8614         // Check that Alice's commitment transaction now contains an output for this HTLC.
8615         let alice_txn = get_local_commitment_txn!(nodes[0], chan_ab.2);
8616         check_spends!(alice_txn[0], chan_ab.3);
8617         assert_eq!(alice_txn[0].output.len(), 2);
8618         check_spends!(alice_txn[1], alice_txn[0]); // 2nd transaction is a non-final HTLC-timeout
8619         assert_eq!(alice_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
8620         assert_eq!(alice_txn.len(), 2);
8621
8622         // Steps (3) and (4):
8623         // If `go_onchain_before_fufill`, broadcast the relevant commitment transaction and check that Bob
8624         // responds by (1) broadcasting a channel update and (2) adding a new ChannelMonitor.
8625         let mut force_closing_node = 0; // Alice force-closes
8626         let mut counterparty_node = 1; // Bob if Alice force-closes
8627
8628         // Bob force-closes
8629         if !broadcast_alice {
8630                 force_closing_node = 1;
8631                 counterparty_node = 0;
8632         }
8633         nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
8634         check_closed_broadcast!(nodes[force_closing_node], true);
8635         check_added_monitors!(nodes[force_closing_node], 1);
8636         check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed);
8637         if go_onchain_before_fulfill {
8638                 let txn_to_broadcast = match broadcast_alice {
8639                         true => alice_txn.clone(),
8640                         false => get_local_commitment_txn!(nodes[1], chan_ab.2)
8641                 };
8642                 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()]));
8643                 if broadcast_alice {
8644                         check_closed_broadcast!(nodes[1], true);
8645                         check_added_monitors!(nodes[1], 1);
8646                         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
8647                 }
8648         }
8649
8650         // Step (5):
8651         // Carol then claims the funds and sends an update_fulfill message to Bob, and they go through the
8652         // process of removing the HTLC from their commitment transactions.
8653         nodes[2].node.claim_funds(payment_preimage);
8654         check_added_monitors!(nodes[2], 1);
8655         expect_payment_claimed!(nodes[2], payment_hash, 3_000_000);
8656
8657         let carol_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
8658         assert!(carol_updates.update_add_htlcs.is_empty());
8659         assert!(carol_updates.update_fail_htlcs.is_empty());
8660         assert!(carol_updates.update_fail_malformed_htlcs.is_empty());
8661         assert!(carol_updates.update_fee.is_none());
8662         assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1);
8663
8664         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]);
8665         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], if go_onchain_before_fulfill || force_closing_node == 1 { None } else { Some(1000) }, false, false);
8666         // If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage.
8667         if !go_onchain_before_fulfill && broadcast_alice {
8668                 let events = nodes[1].node.get_and_clear_pending_msg_events();
8669                 assert_eq!(events.len(), 1);
8670                 match events[0] {
8671                         MessageSendEvent::UpdateHTLCs { ref node_id, .. } => {
8672                                 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8673                         },
8674                         _ => panic!("Unexpected event"),
8675                 };
8676         }
8677         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &carol_updates.commitment_signed);
8678         // One monitor update for the preimage to update the Bob<->Alice channel, one monitor update
8679         // Carol<->Bob's updated commitment transaction info.
8680         check_added_monitors!(nodes[1], 2);
8681
8682         let events = nodes[1].node.get_and_clear_pending_msg_events();
8683         assert_eq!(events.len(), 2);
8684         let bob_revocation = match events[0] {
8685                 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
8686                         assert_eq!(*node_id, nodes[2].node.get_our_node_id());
8687                         (*msg).clone()
8688                 },
8689                 _ => panic!("Unexpected event"),
8690         };
8691         let bob_updates = match events[1] {
8692                 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
8693                         assert_eq!(*node_id, nodes[2].node.get_our_node_id());
8694                         (*updates).clone()
8695                 },
8696                 _ => panic!("Unexpected event"),
8697         };
8698
8699         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revocation);
8700         check_added_monitors!(nodes[2], 1);
8701         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_updates.commitment_signed);
8702         check_added_monitors!(nodes[2], 1);
8703
8704         let events = nodes[2].node.get_and_clear_pending_msg_events();
8705         assert_eq!(events.len(), 1);
8706         let carol_revocation = match events[0] {
8707                 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
8708                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
8709                         (*msg).clone()
8710                 },
8711                 _ => panic!("Unexpected event"),
8712         };
8713         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &carol_revocation);
8714         check_added_monitors!(nodes[1], 1);
8715
8716         // If this test requires the force-closed channel to not be on-chain until after the fulfill,
8717         // here's where we put said channel's commitment tx on-chain.
8718         let mut txn_to_broadcast = alice_txn.clone();
8719         if !broadcast_alice { txn_to_broadcast = get_local_commitment_txn!(nodes[1], chan_ab.2); }
8720         if !go_onchain_before_fulfill {
8721                 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()]));
8722                 // If Bob was the one to force-close, he will have already passed these checks earlier.
8723                 if broadcast_alice {
8724                         check_closed_broadcast!(nodes[1], true);
8725                         check_added_monitors!(nodes[1], 1);
8726                         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
8727                 }
8728                 let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
8729                 if broadcast_alice {
8730                         assert_eq!(bob_txn.len(), 1);
8731                         check_spends!(bob_txn[0], txn_to_broadcast[0]);
8732                 } else {
8733                         assert_eq!(bob_txn.len(), 2);
8734                         check_spends!(bob_txn[0], chan_ab.3);
8735                 }
8736         }
8737
8738         // Step (6):
8739         // Finally, check that Bob broadcasted a preimage-claiming transaction for the HTLC output on the
8740         // broadcasted commitment transaction.
8741         {
8742                 let script_weight = match broadcast_alice {
8743                         true => OFFERED_HTLC_SCRIPT_WEIGHT,
8744                         false => ACCEPTED_HTLC_SCRIPT_WEIGHT
8745                 };
8746                 // If Alice force-closed, Bob only broadcasts a HTLC-output-claiming transaction. Otherwise,
8747                 // Bob force-closed and broadcasts the commitment transaction along with a
8748                 // HTLC-output-claiming transaction.
8749                 let bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
8750                 if broadcast_alice {
8751                         assert_eq!(bob_txn.len(), 1);
8752                         check_spends!(bob_txn[0], txn_to_broadcast[0]);
8753                         assert_eq!(bob_txn[0].input[0].witness.last().unwrap().len(), script_weight);
8754                 } else {
8755                         assert_eq!(bob_txn.len(), 2);
8756                         check_spends!(bob_txn[1], txn_to_broadcast[0]);
8757                         assert_eq!(bob_txn[1].input[0].witness.last().unwrap().len(), script_weight);
8758                 }
8759         }
8760 }
8761
8762 #[test]
8763 fn test_onchain_htlc_settlement_after_close() {
8764         do_test_onchain_htlc_settlement_after_close(true, true);
8765         do_test_onchain_htlc_settlement_after_close(false, true); // Technically redundant, but may as well
8766         do_test_onchain_htlc_settlement_after_close(true, false);
8767         do_test_onchain_htlc_settlement_after_close(false, false);
8768 }
8769
8770 #[test]
8771 fn test_duplicate_temporary_channel_id_from_different_peers() {
8772         // Tests that we can accept two different `OpenChannel` requests with the same
8773         // `temporary_channel_id`, as long as they are from different peers.
8774         let chanmon_cfgs = create_chanmon_cfgs(3);
8775         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
8776         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
8777         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
8778
8779         // Create an first channel channel
8780         nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
8781         let mut open_chan_msg_chan_1_0 = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
8782
8783         // Create an second channel
8784         nodes[2].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
8785         let mut open_chan_msg_chan_2_0 = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
8786
8787         // Modify the `OpenChannel` from `nodes[2]` to `nodes[0]` to ensure that it uses the same
8788         // `temporary_channel_id` as the `OpenChannel` from nodes[1] to nodes[0].
8789         open_chan_msg_chan_2_0.temporary_channel_id = open_chan_msg_chan_1_0.temporary_channel_id;
8790
8791         // Assert that `nodes[0]` can accept both `OpenChannel` requests, even though they use the same
8792         // `temporary_channel_id` as they are from different peers.
8793         nodes[0].node.handle_open_channel(&nodes[1].node.get_our_node_id(), &open_chan_msg_chan_1_0);
8794         {
8795                 let events = nodes[0].node.get_and_clear_pending_msg_events();
8796                 assert_eq!(events.len(), 1);
8797                 match &events[0] {
8798                         MessageSendEvent::SendAcceptChannel { node_id, msg } => {
8799                                 assert_eq!(node_id, &nodes[1].node.get_our_node_id());
8800                                 assert_eq!(msg.temporary_channel_id, open_chan_msg_chan_1_0.temporary_channel_id);
8801                         },
8802                         _ => panic!("Unexpected event"),
8803                 }
8804         }
8805
8806         nodes[0].node.handle_open_channel(&nodes[2].node.get_our_node_id(), &open_chan_msg_chan_2_0);
8807         {
8808                 let events = nodes[0].node.get_and_clear_pending_msg_events();
8809                 assert_eq!(events.len(), 1);
8810                 match &events[0] {
8811                         MessageSendEvent::SendAcceptChannel { node_id, msg } => {
8812                                 assert_eq!(node_id, &nodes[2].node.get_our_node_id());
8813                                 assert_eq!(msg.temporary_channel_id, open_chan_msg_chan_1_0.temporary_channel_id);
8814                         },
8815                         _ => panic!("Unexpected event"),
8816                 }
8817         }
8818 }
8819
8820 #[test]
8821 fn test_duplicate_chan_id() {
8822         // Test that if a given peer tries to open a channel with the same channel_id as one that is
8823         // already open we reject it and keep the old channel.
8824         //
8825         // Previously, full_stack_target managed to figure out that if you tried to open two channels
8826         // with the same funding output (ie post-funding channel_id), we'd create a monitor update for
8827         // the existing channel when we detect the duplicate new channel, screwing up our monitor
8828         // updating logic for the existing channel.
8829         let chanmon_cfgs = create_chanmon_cfgs(2);
8830         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8831         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8832         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8833
8834         // Create an initial channel
8835         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
8836         let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8837         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
8838         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
8839
8840         // Try to create a second channel with the same temporary_channel_id as the first and check
8841         // that it is rejected.
8842         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
8843         {
8844                 let events = nodes[1].node.get_and_clear_pending_msg_events();
8845                 assert_eq!(events.len(), 1);
8846                 match events[0] {
8847                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
8848                                 // Technically, at this point, nodes[1] would be justified in thinking both the
8849                                 // first (valid) and second (invalid) channels are closed, given they both have
8850                                 // the same non-temporary channel_id. However, currently we do not, so we just
8851                                 // move forward with it.
8852                                 assert_eq!(msg.channel_id, open_chan_msg.temporary_channel_id);
8853                                 assert_eq!(node_id, nodes[0].node.get_our_node_id());
8854                         },
8855                         _ => panic!("Unexpected event"),
8856                 }
8857         }
8858
8859         // Move the first channel through the funding flow...
8860         let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
8861
8862         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
8863         check_added_monitors!(nodes[0], 0);
8864
8865         let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
8866         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
8867         {
8868                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
8869                 assert_eq!(added_monitors.len(), 1);
8870                 assert_eq!(added_monitors[0].0, funding_output);
8871                 added_monitors.clear();
8872         }
8873         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
8874
8875         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
8876
8877         let funding_outpoint = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index };
8878         let channel_id = funding_outpoint.to_channel_id();
8879
8880         // Now we have the first channel past funding_created (ie it has a txid-based channel_id, not a
8881         // temporary one).
8882
8883         // First try to open a second channel with a temporary channel id equal to the txid-based one.
8884         // Technically this is allowed by the spec, but we don't support it and there's little reason
8885         // to. Still, it shouldn't cause any other issues.
8886         open_chan_msg.temporary_channel_id = channel_id;
8887         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
8888         {
8889                 let events = nodes[1].node.get_and_clear_pending_msg_events();
8890                 assert_eq!(events.len(), 1);
8891                 match events[0] {
8892                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
8893                                 // Technically, at this point, nodes[1] would be justified in thinking both
8894                                 // channels are closed, but currently we do not, so we just move forward with it.
8895                                 assert_eq!(msg.channel_id, open_chan_msg.temporary_channel_id);
8896                                 assert_eq!(node_id, nodes[0].node.get_our_node_id());
8897                         },
8898                         _ => panic!("Unexpected event"),
8899                 }
8900         }
8901
8902         // Now try to create a second channel which has a duplicate funding output.
8903         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
8904         let open_chan_2_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8905         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_2_msg);
8906         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
8907         create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); // Get and check the FundingGenerationReady event
8908
8909         let (_, funding_created) = {
8910                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
8911                 let mut a_peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
8912                 // Once we call `get_funding_created` the channel has a duplicate channel_id as
8913                 // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
8914                 // try to create another channel. Instead, we drop the channel entirely here (leaving the
8915                 // channelmanager in a possibly nonsense state instead).
8916                 let mut as_chan = a_peer_state.outbound_v1_channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap();
8917                 let logger = test_utils::TestLogger::new();
8918                 as_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap()
8919         };
8920         check_added_monitors!(nodes[0], 0);
8921         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
8922         // At this point we'll look up if the channel_id is present and immediately fail the channel
8923         // without trying to persist the `ChannelMonitor`.
8924         check_added_monitors!(nodes[1], 0);
8925
8926         // ...still, nodes[1] will reject the duplicate channel.
8927         {
8928                 let events = nodes[1].node.get_and_clear_pending_msg_events();
8929                 assert_eq!(events.len(), 1);
8930                 match events[0] {
8931                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
8932                                 // Technically, at this point, nodes[1] would be justified in thinking both
8933                                 // channels are closed, but currently we do not, so we just move forward with it.
8934                                 assert_eq!(msg.channel_id, channel_id);
8935                                 assert_eq!(node_id, nodes[0].node.get_our_node_id());
8936                         },
8937                         _ => panic!("Unexpected event"),
8938                 }
8939         }
8940
8941         // finally, finish creating the original channel and send a payment over it to make sure
8942         // everything is functional.
8943         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
8944         {
8945                 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
8946                 assert_eq!(added_monitors.len(), 1);
8947                 assert_eq!(added_monitors[0].0, funding_output);
8948                 added_monitors.clear();
8949         }
8950         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
8951
8952         let events_4 = nodes[0].node.get_and_clear_pending_events();
8953         assert_eq!(events_4.len(), 0);
8954         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
8955         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
8956
8957         let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
8958         let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
8959         update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
8960
8961         send_payment(&nodes[0], &[&nodes[1]], 8000000);
8962 }
8963
8964 #[test]
8965 fn test_error_chans_closed() {
8966         // Test that we properly handle error messages, closing appropriate channels.
8967         //
8968         // Prior to #787 we'd allow a peer to make us force-close a channel we had with a different
8969         // peer. The "real" fix for that is to index channels with peers_ids, however in the mean time
8970         // we can test various edge cases around it to ensure we don't regress.
8971         let chanmon_cfgs = create_chanmon_cfgs(3);
8972         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
8973         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
8974         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
8975
8976         // Create some initial channels
8977         let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
8978         let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
8979         let chan_3 = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100000, 10001);
8980
8981         assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
8982         assert_eq!(nodes[1].node.list_usable_channels().len(), 2);
8983         assert_eq!(nodes[2].node.list_usable_channels().len(), 1);
8984
8985         // Closing a channel from a different peer has no effect
8986         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_3.2, data: "ERR".to_owned() });
8987         assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
8988
8989         // Closing one channel doesn't impact others
8990         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
8991         check_added_monitors!(nodes[0], 1);
8992         check_closed_broadcast!(nodes[0], false);
8993         check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) });
8994         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
8995         assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
8996         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
8997         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_3.2);
8998
8999         // A null channel ID should close all channels
9000         let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9001         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() });
9002         check_added_monitors!(nodes[0], 2);
9003         check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) });
9004         let events = nodes[0].node.get_and_clear_pending_msg_events();
9005         assert_eq!(events.len(), 2);
9006         match events[0] {
9007                 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
9008                         assert_eq!(msg.contents.flags & 2, 2);
9009                 },
9010                 _ => panic!("Unexpected event"),
9011         }
9012         match events[1] {
9013                 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
9014                         assert_eq!(msg.contents.flags & 2, 2);
9015                 },
9016                 _ => panic!("Unexpected event"),
9017         }
9018         // Note that at this point users of a standard PeerHandler will end up calling
9019         // peer_disconnected.
9020         assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
9021         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
9022
9023         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
9024         assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
9025         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
9026 }
9027
9028 #[test]
9029 fn test_invalid_funding_tx() {
9030         // Test that we properly handle invalid funding transactions sent to us from a peer.
9031         //
9032         // Previously, all other major lightning implementations had failed to properly sanitize
9033         // funding transactions from their counterparties, leading to a multi-implementation critical
9034         // security vulnerability (though we always sanitized properly, we've previously had
9035         // un-released crashes in the sanitization process).
9036         //
9037         // Further, if the funding transaction is consensus-valid, confirms, and is later spent, we'd
9038         // previously have crashed in `ChannelMonitor` even though we closed the channel as bogus and
9039         // gave up on it. We test this here by generating such a transaction.
9040         let chanmon_cfgs = create_chanmon_cfgs(2);
9041         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9042         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9043         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9044
9045         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 10_000, 42, None).unwrap();
9046         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
9047         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9048
9049         let (temporary_channel_id, mut tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
9050
9051         // Create a witness program which can be spent by a 4-empty-stack-elements witness and which is
9052         // 136 bytes long. This matches our "accepted HTLC preimage spend" matching, previously causing
9053         // a panic as we'd try to extract a 32 byte preimage from a witness element without checking
9054         // its length.
9055         let mut wit_program: Vec<u8> = channelmonitor::deliberately_bogus_accepted_htlc_witness_program();
9056         let wit_program_script: Script = wit_program.into();
9057         for output in tx.output.iter_mut() {
9058                 // Make the confirmed funding transaction have a bogus script_pubkey
9059                 output.script_pubkey = Script::new_v0_p2wsh(&wit_program_script.wscript_hash());
9060         }
9061
9062         nodes[0].node.funding_transaction_generated_unchecked(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone(), 0).unwrap();
9063         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
9064         check_added_monitors!(nodes[1], 1);
9065         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9066
9067         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
9068         check_added_monitors!(nodes[0], 1);
9069         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9070
9071         let events_1 = nodes[0].node.get_and_clear_pending_events();
9072         assert_eq!(events_1.len(), 0);
9073
9074         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
9075         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
9076         nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
9077
9078         let expected_err = "funding tx had wrong script/value or output index";
9079         confirm_transaction_at(&nodes[1], &tx, 1);
9080         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() });
9081         check_added_monitors!(nodes[1], 1);
9082         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
9083         assert_eq!(events_2.len(), 1);
9084         if let MessageSendEvent::HandleError { node_id, action } = &events_2[0] {
9085                 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
9086                 if let msgs::ErrorAction::SendErrorMessage { msg } = action {
9087                         assert_eq!(msg.data, "Channel closed because of an exception: ".to_owned() + expected_err);
9088                 } else { panic!(); }
9089         } else { panic!(); }
9090         assert_eq!(nodes[1].node.list_channels().len(), 0);
9091
9092         // Now confirm a spend of the (bogus) funding transaction. As long as the witness is 5 elements
9093         // long the ChannelMonitor will try to read 32 bytes from the second-to-last element, panicing
9094         // as its not 32 bytes long.
9095         let mut spend_tx = Transaction {
9096                 version: 2i32, lock_time: PackedLockTime::ZERO,
9097                 input: tx.output.iter().enumerate().map(|(idx, _)| TxIn {
9098                         previous_output: BitcoinOutPoint {
9099                                 txid: tx.txid(),
9100                                 vout: idx as u32,
9101                         },
9102                         script_sig: Script::new(),
9103                         sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
9104                         witness: Witness::from_vec(channelmonitor::deliberately_bogus_accepted_htlc_witness())
9105                 }).collect(),
9106                 output: vec![TxOut {
9107                         value: 1000,
9108                         script_pubkey: Script::new(),
9109                 }]
9110         };
9111         check_spends!(spend_tx, tx);
9112         mine_transaction(&nodes[1], &spend_tx);
9113 }
9114
9115 fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_timelock: bool) {
9116         // In the first version of the chain::Confirm interface, after a refactor was made to not
9117         // broadcast CSV-locked transactions until their CSV lock is up, we wouldn't reliably broadcast
9118         // transactions after a `transactions_confirmed` call. Specifically, if the chain, provided via
9119         // `best_block_updated` is at height N, and a transaction output which we wish to spend at
9120         // height N-1 (due to a CSV to height N-1) is provided at height N, we will not broadcast the
9121         // spending transaction until height N+1 (or greater). This was due to the way
9122         // `ChannelMonitor::transactions_confirmed` worked, only checking if we should broadcast a
9123         // spending transaction at the height the input transaction was confirmed at, not whether we
9124         // should broadcast a spending transaction at the current height.
9125         // A second, similar, issue involved failing HTLCs backwards - because we only provided the
9126         // height at which transactions were confirmed to `OnchainTx::update_claims_view`, it wasn't
9127         // aware that the anti-reorg-delay had, in fact, already expired, waiting to fail-backwards
9128         // until we learned about an additional block.
9129         //
9130         // As an additional check, if `test_height_before_timelock` is set, we instead test that we
9131         // aren't broadcasting transactions too early (ie not broadcasting them at all).
9132         let chanmon_cfgs = create_chanmon_cfgs(3);
9133         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9134         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9135         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9136         *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
9137
9138         create_announced_chan_between_nodes(&nodes, 0, 1);
9139         let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
9140         let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
9141         nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
9142         nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
9143
9144         nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
9145         check_closed_broadcast!(nodes[1], true);
9146         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
9147         check_added_monitors!(nodes[1], 1);
9148         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
9149         assert_eq!(node_txn.len(), 1);
9150
9151         let conf_height = nodes[1].best_block_info().1;
9152         if !test_height_before_timelock {
9153                 connect_blocks(&nodes[1], 24 * 6);
9154         }
9155         nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
9156                 &nodes[1].get_block_header(conf_height), &[(0, &node_txn[0])], conf_height);
9157         if test_height_before_timelock {
9158                 // If we confirmed the close transaction, but timelocks have not yet expired, we should not
9159                 // generate any events or broadcast any transactions
9160                 assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
9161                 assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
9162         } else {
9163                 // We should broadcast an HTLC transaction spending our funding transaction first
9164                 let spending_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
9165                 assert_eq!(spending_txn.len(), 2);
9166                 assert_eq!(spending_txn[0].txid(), node_txn[0].txid());
9167                 check_spends!(spending_txn[1], node_txn[0]);
9168                 // We should also generate a SpendableOutputs event with the to_self output (as its
9169                 // timelock is up).
9170                 let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
9171                 assert_eq!(descriptor_spend_txn.len(), 1);
9172
9173                 // If we also discover that the HTLC-Timeout transaction was confirmed some time ago, we
9174                 // should immediately fail-backwards the HTLC to the previous hop, without waiting for an
9175                 // additional block built on top of the current chain.
9176                 nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
9177                         &nodes[1].get_block_header(conf_height + 1), &[(0, &spending_txn[1])], conf_height + 1);
9178                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]);
9179                 check_added_monitors!(nodes[1], 1);
9180
9181                 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9182                 assert!(updates.update_add_htlcs.is_empty());
9183                 assert!(updates.update_fulfill_htlcs.is_empty());
9184                 assert_eq!(updates.update_fail_htlcs.len(), 1);
9185                 assert!(updates.update_fail_malformed_htlcs.is_empty());
9186                 assert!(updates.update_fee.is_none());
9187                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
9188                 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
9189                 expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_announce.contents.short_channel_id, true);
9190         }
9191 }
9192
9193 #[test]
9194 fn test_tx_confirmed_skipping_blocks_immediate_broadcast() {
9195         do_test_tx_confirmed_skipping_blocks_immediate_broadcast(false);
9196         do_test_tx_confirmed_skipping_blocks_immediate_broadcast(true);
9197 }
9198
9199 fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) {
9200         let chanmon_cfgs = create_chanmon_cfgs(2);
9201         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9202         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9203         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9204
9205         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9206
9207         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
9208                 .with_bolt11_features(nodes[1].node.invoice_features()).unwrap();
9209         let route = get_route!(nodes[0], payment_params, 10_000).unwrap();
9210
9211         let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[1]);
9212
9213         {
9214                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
9215                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
9216                 check_added_monitors!(nodes[0], 1);
9217                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9218                 assert_eq!(events.len(), 1);
9219                 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
9220                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9221                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
9222         }
9223         expect_pending_htlcs_forwardable!(nodes[1]);
9224         expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 10_000);
9225
9226         {
9227                 // Note that we use a different PaymentId here to allow us to duplicativly pay
9228                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
9229                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_secret.0)).unwrap();
9230                 check_added_monitors!(nodes[0], 1);
9231                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9232                 assert_eq!(events.len(), 1);
9233                 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
9234                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9235                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
9236                 // At this point, nodes[1] would notice it has too much value for the payment. It will
9237                 // assume the second is a privacy attack (no longer particularly relevant
9238                 // post-payment_secrets) and fail back the new HTLC. Previously, it'd also have failed back
9239                 // the first HTLC delivered above.
9240         }
9241
9242         expect_pending_htlcs_forwardable_ignore!(nodes[1]);
9243         nodes[1].node.process_pending_htlc_forwards();
9244
9245         if test_for_second_fail_panic {
9246                 // Now we go fail back the first HTLC from the user end.
9247                 nodes[1].node.fail_htlc_backwards(&our_payment_hash);
9248
9249                 let expected_destinations = vec![
9250                         HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
9251                         HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
9252                 ];
9253                 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1],  expected_destinations);
9254                 nodes[1].node.process_pending_htlc_forwards();
9255
9256                 check_added_monitors!(nodes[1], 1);
9257                 let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9258                 assert_eq!(fail_updates_1.update_fail_htlcs.len(), 2);
9259
9260                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9261                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[1]);
9262                 commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
9263
9264                 let failure_events = nodes[0].node.get_and_clear_pending_events();
9265                 assert_eq!(failure_events.len(), 4);
9266                 if let Event::PaymentPathFailed { .. } = failure_events[0] {} else { panic!(); }
9267                 if let Event::PaymentFailed { .. } = failure_events[1] {} else { panic!(); }
9268                 if let Event::PaymentPathFailed { .. } = failure_events[2] {} else { panic!(); }
9269                 if let Event::PaymentFailed { .. } = failure_events[3] {} else { panic!(); }
9270         } else {
9271                 // Let the second HTLC fail and claim the first
9272                 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
9273                 nodes[1].node.process_pending_htlc_forwards();
9274
9275                 check_added_monitors!(nodes[1], 1);
9276                 let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9277                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9278                 commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
9279
9280                 expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new());
9281
9282                 claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
9283         }
9284 }
9285
9286 #[test]
9287 fn test_dup_htlc_second_fail_panic() {
9288         // Previously, if we received two HTLCs back-to-back, where the second overran the expected
9289         // value for the payment, we'd fail back both HTLCs after generating a `PaymentClaimable` event.
9290         // Then, if the user failed the second payment, they'd hit a "tried to fail an already failed
9291         // HTLC" debug panic. This tests for this behavior, checking that only one HTLC is auto-failed.
9292         do_test_dup_htlc_second_rejected(true);
9293 }
9294
9295 #[test]
9296 fn test_dup_htlc_second_rejected() {
9297         // Test that if we receive a second HTLC for an MPP payment that overruns the payment amount we
9298         // simply reject the second HTLC but are still able to claim the first HTLC.
9299         do_test_dup_htlc_second_rejected(false);
9300 }
9301
9302 #[test]
9303 fn test_inconsistent_mpp_params() {
9304         // Test that if we recieve two HTLCs with different payment parameters we fail back the first
9305         // such HTLC and allow the second to stay.
9306         let chanmon_cfgs = create_chanmon_cfgs(4);
9307         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9308         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9309         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9310
9311         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0);
9312         create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0);
9313         create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0);
9314         let chan_2_3 =create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0);
9315
9316         let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV)
9317                 .with_bolt11_features(nodes[3].node.invoice_features()).unwrap();
9318         let mut route = get_route!(nodes[0], payment_params, 15_000_000).unwrap();
9319         assert_eq!(route.paths.len(), 2);
9320         route.paths.sort_by(|path_a, _| {
9321                 // Sort the path so that the path through nodes[1] comes first
9322                 if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() {
9323                         core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
9324         });
9325
9326         let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[3]);
9327
9328         let cur_height = nodes[0].best_block_info().1;
9329         let payment_id = PaymentId([42; 32]);
9330
9331         let session_privs = {
9332                 // We create a fake route here so that we start with three pending HTLCs, which we'll
9333                 // ultimately have, just not right away.
9334                 let mut dup_route = route.clone();
9335                 dup_route.paths.push(route.paths[1].clone());
9336                 nodes[0].node.test_add_new_pending_payment(our_payment_hash,
9337                         RecipientOnionFields::secret_only(our_payment_secret), payment_id, &dup_route).unwrap()
9338         };
9339         nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash,
9340                 RecipientOnionFields::secret_only(our_payment_secret), 15_000_000, cur_height, payment_id,
9341                 &None, session_privs[0]).unwrap();
9342         check_added_monitors!(nodes[0], 1);
9343
9344         {
9345                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9346                 assert_eq!(events.len(), 1);
9347                 pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), false, None);
9348         }
9349         assert!(nodes[3].node.get_and_clear_pending_events().is_empty());
9350
9351         nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash,
9352                 RecipientOnionFields::secret_only(our_payment_secret), 14_000_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
9353         check_added_monitors!(nodes[0], 1);
9354
9355         {
9356                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9357                 assert_eq!(events.len(), 1);
9358                 let payment_event = SendEvent::from_event(events.pop().unwrap());
9359
9360                 nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9361                 commitment_signed_dance!(nodes[2], nodes[0], payment_event.commitment_msg, false);
9362
9363                 expect_pending_htlcs_forwardable!(nodes[2]);
9364                 check_added_monitors!(nodes[2], 1);
9365
9366                 let mut events = nodes[2].node.get_and_clear_pending_msg_events();
9367                 assert_eq!(events.len(), 1);
9368                 let payment_event = SendEvent::from_event(events.pop().unwrap());
9369
9370                 nodes[3].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
9371                 check_added_monitors!(nodes[3], 0);
9372                 commitment_signed_dance!(nodes[3], nodes[2], payment_event.commitment_msg, true, true);
9373
9374                 // At this point, nodes[3] should notice the two HTLCs don't contain the same total payment
9375                 // amount. It will assume the second is a privacy attack (no longer particularly relevant
9376                 // post-payment_secrets) and fail back the new HTLC.
9377         }
9378         expect_pending_htlcs_forwardable_ignore!(nodes[3]);
9379         nodes[3].node.process_pending_htlc_forwards();
9380         expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
9381         nodes[3].node.process_pending_htlc_forwards();
9382
9383         check_added_monitors!(nodes[3], 1);
9384
9385         let fail_updates_1 = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
9386         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9387         commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false);
9388
9389         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]);
9390         check_added_monitors!(nodes[2], 1);
9391
9392         let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
9393         nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &fail_updates_2.update_fail_htlcs[0]);
9394         commitment_signed_dance!(nodes[0], nodes[2], fail_updates_2.commitment_signed, false);
9395
9396         expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain());
9397
9398         nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash,
9399                 RecipientOnionFields::secret_only(our_payment_secret), 15_000_000, cur_height, payment_id,
9400                 &None, session_privs[2]).unwrap();
9401         check_added_monitors!(nodes[0], 1);
9402
9403         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9404         assert_eq!(events.len(), 1);
9405         pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), true, None);
9406
9407         do_claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, our_payment_preimage);
9408         expect_payment_sent(&nodes[0], our_payment_preimage, Some(None), true);
9409 }
9410
9411 #[test]
9412 fn test_double_partial_claim() {
9413         // Test what happens if a node receives a payment, generates a PaymentClaimable event, the HTLCs
9414         // time out, the sender resends only some of the MPP parts, then the user processes the
9415         // PaymentClaimable event, ensuring they don't inadvertently claim only part of the full payment
9416         // amount.
9417         let chanmon_cfgs = create_chanmon_cfgs(4);
9418         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9419         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9420         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9421
9422         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0);
9423         create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0);
9424         create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0);
9425         create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0);
9426
9427         let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000);
9428         assert_eq!(route.paths.len(), 2);
9429         route.paths.sort_by(|path_a, _| {
9430                 // Sort the path so that the path through nodes[1] comes first
9431                 if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() {
9432                         core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
9433         });
9434
9435         send_along_route_with_secret(&nodes[0], route.clone(), &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 15_000_000, payment_hash, payment_secret);
9436         // nodes[3] has now received a PaymentClaimable event...which it will take some (exorbitant)
9437         // amount of time to respond to.
9438
9439         // Connect some blocks to time out the payment
9440         connect_blocks(&nodes[3], TEST_FINAL_CLTV);
9441         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // To get the same height for sending later
9442
9443         let failed_destinations = vec![
9444                 HTLCDestination::FailedPayment { payment_hash },
9445                 HTLCDestination::FailedPayment { payment_hash },
9446         ];
9447         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations);
9448
9449         pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash, PaymentFailureReason::RecipientRejected);
9450
9451         // nodes[1] now retries one of the two paths...
9452         nodes[0].node.send_payment_with_route(&route, payment_hash,
9453                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9454         check_added_monitors!(nodes[0], 2);
9455
9456         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9457         assert_eq!(events.len(), 2);
9458         let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
9459         pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_1_msgs, false, None);
9460
9461         // At this point nodes[3] has received one half of the payment, and the user goes to handle
9462         // that PaymentClaimable event they got hours ago and never handled...we should refuse to claim.
9463         nodes[3].node.claim_funds(payment_preimage);
9464         check_added_monitors!(nodes[3], 0);
9465         assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty());
9466 }
9467
9468 /// The possible events which may trigger a `max_dust_htlc_exposure` breach
9469 #[derive(Clone, Copy, PartialEq)]
9470 enum ExposureEvent {
9471         /// Breach occurs at HTLC forwarding (see `send_htlc`)
9472         AtHTLCForward,
9473         /// Breach occurs at HTLC reception (see `update_add_htlc`)
9474         AtHTLCReception,
9475         /// Breach occurs at outbound update_fee (see `send_update_fee`)
9476         AtUpdateFeeOutbound,
9477 }
9478
9479 fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool, multiplier_dust_limit: bool) {
9480         // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat`
9481         // policy.
9482         //
9483         // At HTLC forward (`send_payment()`), if the sum of the trimmed-to-dust HTLC inbound and
9484         // trimmed-to-dust HTLC outbound balance and this new payment as included on next
9485         // counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll reject the
9486         // update. At HTLC reception (`update_add_htlc()`), if the sum of the trimmed-to-dust HTLC
9487         // inbound and trimmed-to-dust HTLC outbound balance and this new received HTLC as included
9488         // on next counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll fail
9489         // the update. Note, we return a `temporary_channel_failure` (0x1000 | 7), as the channel
9490         // might be available again for HTLC processing once the dust bandwidth has cleared up.
9491
9492         let chanmon_cfgs = create_chanmon_cfgs(2);
9493         let mut config = test_default_channel_config();
9494         config.channel_config.max_dust_htlc_exposure = if multiplier_dust_limit {
9495                 // Default test fee estimator rate is 253 sat/kw, so we set the multiplier to 5_000_000 / 253
9496                 // to get roughly the same initial value as the default setting when this test was
9497                 // originally written.
9498                 MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253)
9499         } else { MaxDustHTLCExposure::FixedLimitMsat(5_000_000) }; // initial default setting value
9500         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9501         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
9502         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9503
9504         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
9505         let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9506         open_channel.max_htlc_value_in_flight_msat = 50_000_000;
9507         open_channel.max_accepted_htlcs = 60;
9508         if on_holder_tx {
9509                 open_channel.dust_limit_satoshis = 546;
9510         }
9511         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
9512         let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
9513         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
9514
9515         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9516
9517         let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
9518
9519         if on_holder_tx {
9520                 let mut node_0_per_peer_lock;
9521                 let mut node_0_peer_state_lock;
9522                 let mut chan = get_outbound_v1_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id);
9523                 chan.context.holder_dust_limit_satoshis = 546;
9524         }
9525
9526         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9527         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
9528         check_added_monitors!(nodes[1], 1);
9529         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9530
9531         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
9532         check_added_monitors!(nodes[0], 1);
9533         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9534
9535         let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
9536         let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
9537         update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
9538
9539         // Fetch a route in advance as we will be unable to once we're unable to send.
9540         let (mut route, payment_hash, _, payment_secret) =
9541                 get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
9542
9543         let (dust_buffer_feerate, max_dust_htlc_exposure_msat) = {
9544                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
9545                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
9546                 let chan = chan_lock.channel_by_id.get(&channel_id).unwrap();
9547                 (chan.context.get_dust_buffer_feerate(None) as u64,
9548                 chan.context.get_max_dust_htlc_exposure_msat(&LowerBoundedFeeEstimator(nodes[0].fee_estimator)))
9549         };
9550         let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
9551         let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
9552
9553         let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
9554         let dust_inbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat;
9555
9556         let dust_htlc_on_counterparty_tx: u64 = 4;
9557         let dust_htlc_on_counterparty_tx_msat: u64 = max_dust_htlc_exposure_msat / dust_htlc_on_counterparty_tx;
9558
9559         if on_holder_tx {
9560                 if dust_outbound_balance {
9561                         // Outbound dust threshold: 2223 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`)
9562                         // Outbound dust balance: 4372 sats
9563                         // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2132 sats
9564                         for _ in 0..dust_outbound_htlc_on_holder_tx {
9565                                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_outbound_htlc_on_holder_tx_msat);
9566                                 nodes[0].node.send_payment_with_route(&route, payment_hash,
9567                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9568                         }
9569                 } else {
9570                         // Inbound dust threshold: 2324 sats (`dust_buffer_feerate` * HTLC_SUCCESS_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`)
9571                         // Inbound dust balance: 4372 sats
9572                         // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2031 sats
9573                         for _ in 0..dust_inbound_htlc_on_holder_tx {
9574                                 route_payment(&nodes[1], &[&nodes[0]], dust_inbound_htlc_on_holder_tx_msat);
9575                         }
9576                 }
9577         } else {
9578                 if dust_outbound_balance {
9579                         // Outbound dust threshold: 2132 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`)
9580                         // Outbound dust balance: 5000 sats
9581                         for _ in 0..dust_htlc_on_counterparty_tx - 1 {
9582                                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_htlc_on_counterparty_tx_msat);
9583                                 nodes[0].node.send_payment_with_route(&route, payment_hash,
9584                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9585                         }
9586                 } else {
9587                         // Inbound dust threshold: 2031 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`)
9588                         // Inbound dust balance: 5000 sats
9589                         for _ in 0..dust_htlc_on_counterparty_tx - 1 {
9590                                 route_payment(&nodes[1], &[&nodes[0]], dust_htlc_on_counterparty_tx_msat);
9591                         }
9592                 }
9593         }
9594
9595         if exposure_breach_event == ExposureEvent::AtHTLCForward {
9596                 route.paths[0].hops.last_mut().unwrap().fee_msat =
9597                         if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 1 };
9598                 // With default dust exposure: 5000 sats
9599                 if on_holder_tx {
9600                         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash,
9601                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
9602                                 ), true, APIError::ChannelUnavailable { .. }, {});
9603                 } else {
9604                         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash,
9605                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
9606                                 ), true, APIError::ChannelUnavailable { .. }, {});
9607                 }
9608         } else if exposure_breach_event == ExposureEvent::AtHTLCReception {
9609                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 4 });
9610                 nodes[1].node.send_payment_with_route(&route, payment_hash,
9611                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9612                 check_added_monitors!(nodes[1], 1);
9613                 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
9614                 assert_eq!(events.len(), 1);
9615                 let payment_event = SendEvent::from_event(events.remove(0));
9616                 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
9617                 // With default dust exposure: 5000 sats
9618                 if on_holder_tx {
9619                         // Outbound dust balance: 6399 sats
9620                         let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1);
9621                         let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat;
9622                         nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, max_dust_htlc_exposure_msat), 1);
9623                 } else {
9624                         // Outbound dust balance: 5200 sats
9625                         nodes[0].logger.assert_log("lightning::ln::channel".to_string(),
9626                                 format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
9627                                         dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx - 1) + dust_htlc_on_counterparty_tx_msat + 4,
9628                                         max_dust_htlc_exposure_msat), 1);
9629                 }
9630         } else if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound {
9631                 route.paths[0].hops.last_mut().unwrap().fee_msat = 2_500_000;
9632                 // For the multiplier dust exposure limit, since it scales with feerate,
9633                 // we need to add a lot of HTLCs that will become dust at the new feerate
9634                 // to cross the threshold.
9635                 for _ in 0..20 {
9636                         let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[1], Some(1_000), None);
9637                         nodes[0].node.send_payment_with_route(&route, payment_hash,
9638                                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9639                 }
9640                 {
9641                         let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
9642                         *feerate_lock = *feerate_lock * 10;
9643                 }
9644                 nodes[0].node.timer_tick_occurred();
9645                 check_added_monitors!(nodes[0], 1);
9646                 nodes[0].logger.assert_log_contains("lightning::ln::channel", "Cannot afford to send new feerate at 2530 without infringing max dust htlc exposure", 1);
9647         }
9648
9649         let _ = nodes[0].node.get_and_clear_pending_msg_events();
9650         let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
9651         added_monitors.clear();
9652 }
9653
9654 fn do_test_max_dust_htlc_exposure_by_threshold_type(multiplier_dust_limit: bool) {
9655         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit);
9656         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit);
9657         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit);
9658         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit);
9659         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit);
9660         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit);
9661         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit);
9662         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit);
9663         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit);
9664         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit);
9665         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit);
9666         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit);
9667 }
9668
9669 #[test]
9670 fn test_max_dust_htlc_exposure() {
9671         do_test_max_dust_htlc_exposure_by_threshold_type(false);
9672         do_test_max_dust_htlc_exposure_by_threshold_type(true);
9673 }
9674
9675 #[test]
9676 fn test_non_final_funding_tx() {
9677         let chanmon_cfgs = create_chanmon_cfgs(2);
9678         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9679         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9680         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9681
9682         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
9683         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9684         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
9685         let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
9686         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
9687
9688         let best_height = nodes[0].node.best_block.read().unwrap().height();
9689
9690         let chan_id = *nodes[0].network_chan_count.borrow();
9691         let events = nodes[0].node.get_and_clear_pending_events();
9692         let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::Script::new(), sequence: Sequence(1), witness: Witness::from_vec(vec!(vec!(1))) };
9693         assert_eq!(events.len(), 1);
9694         let mut tx = match events[0] {
9695                 Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => {
9696                         // Timelock the transaction _beyond_ the best client height + 1.
9697                         Transaction { version: chan_id as i32, lock_time: PackedLockTime(best_height + 2), input: vec![input], output: vec![TxOut {
9698                                 value: *channel_value_satoshis, script_pubkey: output_script.clone(),
9699                         }]}
9700                 },
9701                 _ => panic!("Unexpected event"),
9702         };
9703         // Transaction should fail as it's evaluated as non-final for propagation.
9704         match nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()) {
9705                 Err(APIError::APIMisuseError { err }) => {
9706                         assert_eq!(format!("Funding transaction absolute timelock is non-final"), err);
9707                 },
9708                 _ => panic!()
9709         }
9710
9711         // However, transaction should be accepted if it's in a +1 headroom from best block.
9712         tx.lock_time = PackedLockTime(tx.lock_time.0 - 1);
9713         assert!(nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
9714         get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9715 }
9716
9717 #[test]
9718 fn accept_busted_but_better_fee() {
9719         // If a peer sends us a fee update that is too low, but higher than our previous channel
9720         // feerate, we should accept it. In the future we may want to consider closing the channel
9721         // later, but for now we only accept the update.
9722         let mut chanmon_cfgs = create_chanmon_cfgs(2);
9723         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9724         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9725         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9726
9727         create_chan_between_nodes(&nodes[0], &nodes[1]);
9728
9729         // Set nodes[1] to expect 5,000 sat/kW.
9730         {
9731                 let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
9732                 *feerate_lock = 5000;
9733         }
9734
9735         // If nodes[0] increases their feerate, even if its not enough, nodes[1] should accept it.
9736         {
9737                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
9738                 *feerate_lock = 1000;
9739         }
9740         nodes[0].node.timer_tick_occurred();
9741         check_added_monitors!(nodes[0], 1);
9742
9743         let events = nodes[0].node.get_and_clear_pending_msg_events();
9744         assert_eq!(events.len(), 1);
9745         match events[0] {
9746                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
9747                         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
9748                         commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false);
9749                 },
9750                 _ => panic!("Unexpected event"),
9751         };
9752
9753         // If nodes[0] increases their feerate further, even if its not enough, nodes[1] should accept
9754         // it.
9755         {
9756                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
9757                 *feerate_lock = 2000;
9758         }
9759         nodes[0].node.timer_tick_occurred();
9760         check_added_monitors!(nodes[0], 1);
9761
9762         let events = nodes[0].node.get_and_clear_pending_msg_events();
9763         assert_eq!(events.len(), 1);
9764         match events[0] {
9765                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
9766                         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
9767                         commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false);
9768                 },
9769                 _ => panic!("Unexpected event"),
9770         };
9771
9772         // However, if nodes[0] decreases their feerate, nodes[1] should reject it and close the
9773         // channel.
9774         {
9775                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
9776                 *feerate_lock = 1000;
9777         }
9778         nodes[0].node.timer_tick_occurred();
9779         check_added_monitors!(nodes[0], 1);
9780
9781         let events = nodes[0].node.get_and_clear_pending_msg_events();
9782         assert_eq!(events.len(), 1);
9783         match events[0] {
9784                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
9785                         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
9786                         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError {
9787                                 err: "Peer's feerate much too low. Actual: 1000. Our expected lower limit: 5000 (- 250)".to_owned() });
9788                         check_closed_broadcast!(nodes[1], true);
9789                         check_added_monitors!(nodes[1], 1);
9790                 },
9791                 _ => panic!("Unexpected event"),
9792         };
9793 }
9794
9795 fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash: bool) {
9796         let mut chanmon_cfgs = create_chanmon_cfgs(2);
9797         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9798         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9799         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9800         let min_final_cltv_expiry_delta = 120;
9801         let final_cltv_expiry_delta = if valid_delta { min_final_cltv_expiry_delta + 2 } else {
9802                 min_final_cltv_expiry_delta - 2 };
9803         let recv_value = 100_000;
9804
9805         create_chan_between_nodes(&nodes[0], &nodes[1]);
9806
9807         let payment_parameters = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), final_cltv_expiry_delta as u32);
9808         let (payment_hash, payment_preimage, payment_secret) = if use_user_hash {
9809                 let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[1],
9810                         Some(recv_value), Some(min_final_cltv_expiry_delta));
9811                 (payment_hash, payment_preimage, payment_secret)
9812         } else {
9813                 let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(recv_value), 7200, Some(min_final_cltv_expiry_delta)).unwrap();
9814                 (payment_hash, nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap(), payment_secret)
9815         };
9816         let route = get_route!(nodes[0], payment_parameters, recv_value).unwrap();
9817         nodes[0].node.send_payment_with_route(&route, payment_hash,
9818                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9819         check_added_monitors!(nodes[0], 1);
9820         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9821         assert_eq!(events.len(), 1);
9822         let mut payment_event = SendEvent::from_event(events.pop().unwrap());
9823         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9824         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
9825         expect_pending_htlcs_forwardable!(nodes[1]);
9826
9827         if valid_delta {
9828                 expect_payment_claimable!(nodes[1], payment_hash, payment_secret, recv_value, if use_user_hash {
9829                         None } else { Some(payment_preimage) }, nodes[1].node.get_our_node_id());
9830
9831                 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
9832         } else {
9833                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
9834
9835                 check_added_monitors!(nodes[1], 1);
9836
9837                 let fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9838                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates.update_fail_htlcs[0]);
9839                 commitment_signed_dance!(nodes[0], nodes[1], fail_updates.commitment_signed, false, true);
9840
9841                 expect_payment_failed!(nodes[0], payment_hash, true);
9842         }
9843 }
9844
9845 #[test]
9846 fn test_payment_with_custom_min_cltv_expiry_delta() {
9847         do_payment_with_custom_min_final_cltv_expiry(false, false);
9848         do_payment_with_custom_min_final_cltv_expiry(false, true);
9849         do_payment_with_custom_min_final_cltv_expiry(true, false);
9850         do_payment_with_custom_min_final_cltv_expiry(true, true);
9851 }
9852
9853 #[test]
9854 fn test_disconnects_peer_awaiting_response_ticks() {
9855         // Tests that nodes which are awaiting on a response critical for channel responsiveness
9856         // disconnect their counterparty after `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
9857         let mut chanmon_cfgs = create_chanmon_cfgs(2);
9858         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9859         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9860         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9861
9862         // Asserts a disconnect event is queued to the user.
9863         let check_disconnect_event = |node: &Node, should_disconnect: bool| {
9864                 let disconnect_event = node.node.get_and_clear_pending_msg_events().iter().find_map(|event|
9865                         if let MessageSendEvent::HandleError { action, .. } = event {
9866                                 if let msgs::ErrorAction::DisconnectPeerWithWarning { .. } = action {
9867                                         Some(())
9868                                 } else {
9869                                         None
9870                                 }
9871                         } else {
9872                                 None
9873                         }
9874                 );
9875                 assert_eq!(disconnect_event.is_some(), should_disconnect);
9876         };
9877
9878         // Fires timer ticks ensuring we only attempt to disconnect peers after reaching
9879         // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
9880         let check_disconnect = |node: &Node| {
9881                 // No disconnect without any timer ticks.
9882                 check_disconnect_event(node, false);
9883
9884                 // No disconnect with 1 timer tick less than required.
9885                 for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS - 1 {
9886                         node.node.timer_tick_occurred();
9887                         check_disconnect_event(node, false);
9888                 }
9889
9890                 // Disconnect after reaching the required ticks.
9891                 node.node.timer_tick_occurred();
9892                 check_disconnect_event(node, true);
9893
9894                 // Disconnect again on the next tick if the peer hasn't been disconnected yet.
9895                 node.node.timer_tick_occurred();
9896                 check_disconnect_event(node, true);
9897         };
9898
9899         create_chan_between_nodes(&nodes[0], &nodes[1]);
9900
9901         // We'll start by performing a fee update with Alice (nodes[0]) on the channel.
9902         *nodes[0].fee_estimator.sat_per_kw.lock().unwrap() *= 2;
9903         nodes[0].node.timer_tick_occurred();
9904         check_added_monitors!(&nodes[0], 1);
9905         let alice_fee_update = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
9906         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), alice_fee_update.update_fee.as_ref().unwrap());
9907         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &alice_fee_update.commitment_signed);
9908         check_added_monitors!(&nodes[1], 1);
9909
9910         // This will prompt Bob (nodes[1]) to respond with his `CommitmentSigned` and `RevokeAndACK`.
9911         let (bob_revoke_and_ack, bob_commitment_signed) = get_revoke_commit_msgs!(&nodes[1], nodes[0].node.get_our_node_id());
9912         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revoke_and_ack);
9913         check_added_monitors!(&nodes[0], 1);
9914         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_commitment_signed);
9915         check_added_monitors(&nodes[0], 1);
9916
9917         // Alice then needs to send her final `RevokeAndACK` to complete the commitment dance. We
9918         // pretend Bob hasn't received the message and check whether he'll disconnect Alice after
9919         // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
9920         let alice_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
9921         check_disconnect(&nodes[1]);
9922
9923         // Now, we'll reconnect them to test awaiting a `ChannelReestablish` message.
9924         //
9925         // Note that since the commitment dance didn't complete above, Alice is expected to resend her
9926         // final `RevokeAndACK` to Bob to complete it.
9927         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
9928         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
9929         let bob_init = msgs::Init {
9930                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
9931         };
9932         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &bob_init, true).unwrap();
9933         let alice_init = msgs::Init {
9934                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
9935         };
9936         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &alice_init, true).unwrap();
9937
9938         // Upon reconnection, Alice sends her `ChannelReestablish` to Bob. Alice, however, hasn't
9939         // received Bob's yet, so she should disconnect him after reaching
9940         // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
9941         let alice_channel_reestablish = get_event_msg!(
9942                 nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()
9943         );
9944         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &alice_channel_reestablish);
9945         check_disconnect(&nodes[0]);
9946
9947         // Bob now sends his `ChannelReestablish` to Alice to resume the channel and consider it "live".
9948         let bob_channel_reestablish = nodes[1].node.get_and_clear_pending_msg_events().iter().find_map(|event|
9949                 if let MessageSendEvent::SendChannelReestablish { node_id, msg } = event {
9950                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
9951                         Some(msg.clone())
9952                 } else {
9953                         None
9954                 }
9955         ).unwrap();
9956         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bob_channel_reestablish);
9957
9958         // Sanity check that Alice won't disconnect Bob since she's no longer waiting for any messages.
9959         for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS {
9960                 nodes[0].node.timer_tick_occurred();
9961                 check_disconnect_event(&nodes[0], false);
9962         }
9963
9964         // However, Bob is still waiting on Alice's `RevokeAndACK`, so he should disconnect her after
9965         // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
9966         check_disconnect(&nodes[1]);
9967
9968         // Finally, have Bob process the last message.
9969         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &alice_revoke_and_ack);
9970         check_added_monitors(&nodes[1], 1);
9971
9972         // At this point, neither node should attempt to disconnect each other, since they aren't
9973         // waiting on any messages.
9974         for node in &nodes {
9975                 for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS {
9976                         node.node.timer_tick_occurred();
9977                         check_disconnect_event(node, false);
9978                 }
9979         }
9980 }
9981
9982 #[test]
9983 fn test_remove_expired_outbound_unfunded_channels() {
9984         let chanmon_cfgs = create_chanmon_cfgs(2);
9985         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9986         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9987         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9988
9989         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
9990         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9991         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
9992         let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
9993         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
9994
9995         let events = nodes[0].node.get_and_clear_pending_events();
9996         assert_eq!(events.len(), 1);
9997         match events[0] {
9998                 Event::FundingGenerationReady { .. } => (),
9999                 _ => panic!("Unexpected event"),
10000         };
10001
10002         // Asserts the outbound channel has been removed from a nodes[0]'s peer state map.
10003         let check_outbound_channel_existence = |should_exist: bool| {
10004                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
10005                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
10006                 assert_eq!(chan_lock.outbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist);
10007         };
10008
10009         // Channel should exist without any timer ticks.
10010         check_outbound_channel_existence(true);
10011
10012         // Channel should exist with 1 timer tick less than required.
10013         for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
10014                 nodes[0].node.timer_tick_occurred();
10015                 check_outbound_channel_existence(true)
10016         }
10017
10018         // Remove channel after reaching the required ticks.
10019         nodes[0].node.timer_tick_occurred();
10020         check_outbound_channel_existence(false);
10021
10022         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
10023 }
10024
10025 #[test]
10026 fn test_remove_expired_inbound_unfunded_channels() {
10027         let chanmon_cfgs = create_chanmon_cfgs(2);
10028         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10029         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10030         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10031
10032         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
10033         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10034         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10035         let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10036         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10037
10038         let events = nodes[0].node.get_and_clear_pending_events();
10039         assert_eq!(events.len(), 1);
10040         match events[0] {
10041                 Event::FundingGenerationReady { .. } => (),
10042                 _ => panic!("Unexpected event"),
10043         };
10044
10045         // Asserts the inbound channel has been removed from a nodes[1]'s peer state map.
10046         let check_inbound_channel_existence = |should_exist: bool| {
10047                 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
10048                 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
10049                 assert_eq!(chan_lock.inbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist);
10050         };
10051
10052         // Channel should exist without any timer ticks.
10053         check_inbound_channel_existence(true);
10054
10055         // Channel should exist with 1 timer tick less than required.
10056         for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
10057                 nodes[1].node.timer_tick_occurred();
10058                 check_inbound_channel_existence(true)
10059         }
10060
10061         // Remove channel after reaching the required ticks.
10062         nodes[1].node.timer_tick_occurred();
10063         check_inbound_channel_existence(false);
10064
10065         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
10066 }