Add a `RecipientOnionFields` argument to spontaneous payment sends
[rust-lightning] / lightning / src / ln / functional_tests.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 //! Tests that test standing up a network of ChannelManagers, creating channels, sending
11 //! payments/messages between them, and often checking the resulting ChannelMonitors are able to
12 //! claim outputs on-chain.
13
14 use crate::chain;
15 use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
16 use crate::chain::chaininterface::LowerBoundedFeeEstimator;
17 use crate::chain::channelmonitor;
18 use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
19 use crate::chain::transaction::OutPoint;
20 use crate::chain::keysinterface::{ChannelSigner, EcdsaChannelSigner, EntropySource};
21 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination};
22 use crate::ln::{PaymentPreimage, PaymentSecret, PaymentHash};
23 use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT};
24 use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA};
25 use crate::ln::channel::{Channel, ChannelError};
26 use crate::ln::{chan_utils, onion_utils};
27 use crate::ln::chan_utils::{OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment};
28 use crate::routing::gossip::{NetworkGraph, NetworkUpdate};
29 use crate::routing::router::{PaymentParameters, Route, RouteHop, RouteParameters, find_route, get_route};
30 use crate::ln::features::{ChannelFeatures, NodeFeatures};
31 use crate::ln::msgs;
32 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
33 use crate::util::enforcing_trait_impls::EnforcingSigner;
34 use crate::util::test_utils;
35 use crate::util::errors::APIError;
36 use crate::util::ser::{Writeable, ReadableArgs};
37 use crate::util::string::UntrustedString;
38 use crate::util::config::UserConfig;
39
40 use bitcoin::hash_types::BlockHash;
41 use bitcoin::blockdata::block::{Block, BlockHeader};
42 use bitcoin::blockdata::script::{Builder, Script};
43 use bitcoin::blockdata::opcodes;
44 use bitcoin::blockdata::constants::genesis_block;
45 use bitcoin::network::constants::Network;
46 use bitcoin::{PackedLockTime, Sequence, Transaction, TxIn, TxMerkleNode, TxOut, Witness};
47 use bitcoin::OutPoint as BitcoinOutPoint;
48
49 use bitcoin::secp256k1::Secp256k1;
50 use bitcoin::secp256k1::{PublicKey,SecretKey};
51
52 use regex;
53
54 use crate::io;
55 use crate::prelude::*;
56 use alloc::collections::BTreeSet;
57 use core::default::Default;
58 use core::iter::repeat;
59 use bitcoin::hashes::Hash;
60 use crate::sync::{Arc, Mutex};
61
62 use crate::ln::functional_test_utils::*;
63 use crate::ln::chan_utils::CommitmentTransaction;
64
65 #[test]
66 fn test_insane_channel_opens() {
67         // Stand up a network of 2 nodes
68         use crate::ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS;
69         let mut cfg = UserConfig::default();
70         cfg.channel_handshake_limits.max_funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1;
71         let chanmon_cfgs = create_chanmon_cfgs(2);
72         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
73         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(cfg)]);
74         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
75
76         // Instantiate channel parameters where we push the maximum msats given our
77         // funding satoshis
78         let channel_value_sat = 31337; // same as funding satoshis
79         let channel_reserve_satoshis = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg);
80         let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000;
81
82         // Have node0 initiate a channel to node1 with aforementioned parameters
83         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_sat, push_msat, 42, None).unwrap();
84
85         // Extract the channel open message from node0 to node1
86         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
87
88         // Test helper that asserts we get the correct error string given a mutator
89         // that supposedly makes the channel open message insane
90         let insane_open_helper = |expected_error_str: &str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| {
91                 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &message_mutator(open_channel_message.clone()));
92                 let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
93                 assert_eq!(msg_events.len(), 1);
94                 let expected_regex = regex::Regex::new(expected_error_str).unwrap();
95                 if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] {
96                         match action {
97                                 &ErrorAction::SendErrorMessage { .. } => {
98                                         nodes[1].logger.assert_log_regex("lightning::ln::channelmanager", expected_regex, 1);
99                                 },
100                                 _ => panic!("unexpected event!"),
101                         }
102                 } else { assert!(false); }
103         };
104
105         use crate::ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT;
106
107         // Test all mutations that would make the channel open message insane
108         insane_open_helper(format!("Per our config, funding must be at most {}. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; msg });
109         insane_open_helper(format!("Funding must be smaller than the total bitcoin supply. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; msg });
110
111         insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.funding_satoshis + 1; msg });
112
113         insane_open_helper(r"push_msat \d+ was larger than channel amount minus reserve \(\d+\)", |mut msg| { msg.push_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
114
115         insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.dust_limit_satoshis = msg.funding_satoshis + 1 ; msg });
116
117         insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.htlc_minimum_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg });
118
119         insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg });
120
121         insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { msg.max_accepted_htlcs = 0; msg });
122
123         insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.max_accepted_htlcs = 484; msg });
124 }
125
126 #[test]
127 fn test_funding_exceeds_no_wumbo_limit() {
128         // Test that if a peer does not support wumbo channels, we'll refuse to open a wumbo channel to
129         // them.
130         use crate::ln::channel::MAX_FUNDING_SATOSHIS_NO_WUMBO;
131         let chanmon_cfgs = create_chanmon_cfgs(2);
132         let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
133         *node_cfgs[1].override_init_features.borrow_mut() = Some(channelmanager::provided_init_features(&test_default_channel_config()).clear_wumbo());
134         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
135         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
136
137         match nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), MAX_FUNDING_SATOSHIS_NO_WUMBO + 1, 0, 42, None) {
138                 Err(APIError::APIMisuseError { err }) => {
139                         assert_eq!(format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, MAX_FUNDING_SATOSHIS_NO_WUMBO + 1), err);
140                 },
141                 _ => panic!()
142         }
143 }
144
145 fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
146         // A peer providing a channel_reserve_satoshis of 0 (or less than our dust limit) is insecure,
147         // but only for them. Because some LSPs do it with some level of trust of the clients (for a
148         // substantial UX improvement), we explicitly allow it. Because it's unlikely to happen often
149         // in normal testing, we test it explicitly here.
150         let chanmon_cfgs = create_chanmon_cfgs(2);
151         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
152         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
153         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
154         let default_config = UserConfig::default();
155
156         // Have node0 initiate a channel to node1 with aforementioned parameters
157         let mut push_amt = 100_000_000;
158         let feerate_per_kw = 253;
159         let opt_anchors = false;
160         push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(opt_anchors) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000;
161         push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
162
163         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None).unwrap();
164         let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
165         if !send_from_initiator {
166                 open_channel_message.channel_reserve_satoshis = 0;
167                 open_channel_message.max_htlc_value_in_flight_msat = 100_000_000;
168         }
169         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
170
171         // Extract the channel accept message from node1 to node0
172         let mut accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
173         if send_from_initiator {
174                 accept_channel_message.channel_reserve_satoshis = 0;
175                 accept_channel_message.max_htlc_value_in_flight_msat = 100_000_000;
176         }
177         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
178         {
179                 let sender_node = if send_from_initiator { &nodes[1] } else { &nodes[0] };
180                 let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] };
181                 let mut sender_node_per_peer_lock;
182                 let mut sender_node_peer_state_lock;
183                 let mut chan = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
184                 chan.holder_selected_channel_reserve_satoshis = 0;
185                 chan.holder_max_htlc_value_in_flight_msat = 100_000_000;
186         }
187
188         let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id);
189         let funding_msgs = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &funding_tx);
190         create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_msgs.0);
191
192         // nodes[0] should now be able to send the full balance to nodes[1], violating nodes[1]'s
193         // security model if it ever tries to send funds back to nodes[0] (but that's not our problem).
194         if send_from_initiator {
195                 send_payment(&nodes[0], &[&nodes[1]], 100_000_000
196                         // Note that for outbound channels we have to consider the commitment tx fee and the
197                         // "fee spike buffer", which is currently a multiple of the total commitment tx fee as
198                         // well as an additional HTLC.
199                         - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * commit_tx_fee_msat(feerate_per_kw, 2, opt_anchors));
200         } else {
201                 send_payment(&nodes[1], &[&nodes[0]], push_amt);
202         }
203 }
204
205 #[test]
206 fn test_counterparty_no_reserve() {
207         do_test_counterparty_no_reserve(true);
208         do_test_counterparty_no_reserve(false);
209 }
210
211 #[test]
212 fn test_async_inbound_update_fee() {
213         let chanmon_cfgs = create_chanmon_cfgs(2);
214         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
215         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
216         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
217         create_announced_chan_between_nodes(&nodes, 0, 1);
218
219         // balancing
220         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
221
222         // A                                        B
223         // update_fee                            ->
224         // send (1) commitment_signed            -.
225         //                                       <- update_add_htlc/commitment_signed
226         // send (2) RAA (awaiting remote revoke) -.
227         // (1) commitment_signed is delivered    ->
228         //                                       .- send (3) RAA (awaiting remote revoke)
229         // (2) RAA is delivered                  ->
230         //                                       .- send (4) commitment_signed
231         //                                       <- (3) RAA is delivered
232         // send (5) commitment_signed            -.
233         //                                       <- (4) commitment_signed is delivered
234         // send (6) RAA                          -.
235         // (5) commitment_signed is delivered    ->
236         //                                       <- RAA
237         // (6) RAA is delivered                  ->
238
239         // First nodes[0] generates an update_fee
240         {
241                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
242                 *feerate_lock += 20;
243         }
244         nodes[0].node.timer_tick_occurred();
245         check_added_monitors!(nodes[0], 1);
246
247         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
248         assert_eq!(events_0.len(), 1);
249         let (update_msg, commitment_signed) = match events_0[0] { // (1)
250                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
251                         (update_fee.as_ref(), commitment_signed)
252                 },
253                 _ => panic!("Unexpected event"),
254         };
255
256         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
257
258         // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
259         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000);
260         nodes[1].node.send_payment_with_route(&route, our_payment_hash,
261                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
262         check_added_monitors!(nodes[1], 1);
263
264         let payment_event = {
265                 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
266                 assert_eq!(events_1.len(), 1);
267                 SendEvent::from_event(events_1.remove(0))
268         };
269         assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
270         assert_eq!(payment_event.msgs.len(), 1);
271
272         // ...now when the messages get delivered everyone should be happy
273         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
274         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2)
275         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
276         // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
277         check_added_monitors!(nodes[0], 1);
278
279         // deliver(1), generate (3):
280         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
281         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
282         // nodes[1] is awaiting nodes[0] revoke_and_ack so get_event_msg's assert(len == 1) passes
283         check_added_monitors!(nodes[1], 1);
284
285         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack); // deliver (2)
286         let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
287         assert!(bs_update.update_add_htlcs.is_empty()); // (4)
288         assert!(bs_update.update_fulfill_htlcs.is_empty()); // (4)
289         assert!(bs_update.update_fail_htlcs.is_empty()); // (4)
290         assert!(bs_update.update_fail_malformed_htlcs.is_empty()); // (4)
291         assert!(bs_update.update_fee.is_none()); // (4)
292         check_added_monitors!(nodes[1], 1);
293
294         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); // deliver (3)
295         let as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
296         assert!(as_update.update_add_htlcs.is_empty()); // (5)
297         assert!(as_update.update_fulfill_htlcs.is_empty()); // (5)
298         assert!(as_update.update_fail_htlcs.is_empty()); // (5)
299         assert!(as_update.update_fail_malformed_htlcs.is_empty()); // (5)
300         assert!(as_update.update_fee.is_none()); // (5)
301         check_added_monitors!(nodes[0], 1);
302
303         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed); // deliver (4)
304         let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
305         // only (6) so get_event_msg's assert(len == 1) passes
306         check_added_monitors!(nodes[0], 1);
307
308         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update.commitment_signed); // deliver (5)
309         let bs_second_revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
310         check_added_monitors!(nodes[1], 1);
311
312         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke);
313         check_added_monitors!(nodes[0], 1);
314
315         let events_2 = nodes[0].node.get_and_clear_pending_events();
316         assert_eq!(events_2.len(), 1);
317         match events_2[0] {
318                 Event::PendingHTLCsForwardable {..} => {}, // If we actually processed we'd receive the payment
319                 _ => panic!("Unexpected event"),
320         }
321
322         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke); // deliver (6)
323         check_added_monitors!(nodes[1], 1);
324 }
325
326 #[test]
327 fn test_update_fee_unordered_raa() {
328         // Just the intro to the previous test followed by an out-of-order RAA (which caused a
329         // crash in an earlier version of the update_fee patch)
330         let chanmon_cfgs = create_chanmon_cfgs(2);
331         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
332         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
333         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
334         create_announced_chan_between_nodes(&nodes, 0, 1);
335
336         // balancing
337         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
338
339         // First nodes[0] generates an update_fee
340         {
341                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
342                 *feerate_lock += 20;
343         }
344         nodes[0].node.timer_tick_occurred();
345         check_added_monitors!(nodes[0], 1);
346
347         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
348         assert_eq!(events_0.len(), 1);
349         let update_msg = match events_0[0] { // (1)
350                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
351                         update_fee.as_ref()
352                 },
353                 _ => panic!("Unexpected event"),
354         };
355
356         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
357
358         // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
359         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000);
360         nodes[1].node.send_payment_with_route(&route, our_payment_hash,
361                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
362         check_added_monitors!(nodes[1], 1);
363
364         let payment_event = {
365                 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
366                 assert_eq!(events_1.len(), 1);
367                 SendEvent::from_event(events_1.remove(0))
368         };
369         assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
370         assert_eq!(payment_event.msgs.len(), 1);
371
372         // ...now when the messages get delivered everyone should be happy
373         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
374         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2)
375         let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
376         // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
377         check_added_monitors!(nodes[0], 1);
378
379         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg); // deliver (2)
380         check_added_monitors!(nodes[1], 1);
381
382         // We can't continue, sadly, because our (1) now has a bogus signature
383 }
384
385 #[test]
386 fn test_multi_flight_update_fee() {
387         let chanmon_cfgs = create_chanmon_cfgs(2);
388         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
389         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
390         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
391         create_announced_chan_between_nodes(&nodes, 0, 1);
392
393         // A                                        B
394         // update_fee/commitment_signed          ->
395         //                                       .- send (1) RAA and (2) commitment_signed
396         // update_fee (never committed)          ->
397         // (3) update_fee                        ->
398         // We have to manually generate the above update_fee, it is allowed by the protocol but we
399         // don't track which updates correspond to which revoke_and_ack responses so we're in
400         // AwaitingRAA mode and will not generate the update_fee yet.
401         //                                       <- (1) RAA delivered
402         // (3) is generated and send (4) CS      -.
403         // Note that A cannot generate (4) prior to (1) being delivered as it otherwise doesn't
404         // know the per_commitment_point to use for it.
405         //                                       <- (2) commitment_signed delivered
406         // revoke_and_ack                        ->
407         //                                          B should send no response here
408         // (4) commitment_signed delivered       ->
409         //                                       <- RAA/commitment_signed delivered
410         // revoke_and_ack                        ->
411
412         // First nodes[0] generates an update_fee
413         let initial_feerate;
414         {
415                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
416                 initial_feerate = *feerate_lock;
417                 *feerate_lock = initial_feerate + 20;
418         }
419         nodes[0].node.timer_tick_occurred();
420         check_added_monitors!(nodes[0], 1);
421
422         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
423         assert_eq!(events_0.len(), 1);
424         let (update_msg_1, commitment_signed_1) = match events_0[0] { // (1)
425                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
426                         (update_fee.as_ref().unwrap(), commitment_signed)
427                 },
428                 _ => panic!("Unexpected event"),
429         };
430
431         // Deliver first update_fee/commitment_signed pair, generating (1) and (2):
432         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg_1);
433         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed_1);
434         let (bs_revoke_msg, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
435         check_added_monitors!(nodes[1], 1);
436
437         // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment
438         // transaction:
439         {
440                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
441                 *feerate_lock = initial_feerate + 40;
442         }
443         nodes[0].node.timer_tick_occurred();
444         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
445         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
446
447         // Create the (3) update_fee message that nodes[0] will generate before it does...
448         let mut update_msg_2 = msgs::UpdateFee {
449                 channel_id: update_msg_1.channel_id.clone(),
450                 feerate_per_kw: (initial_feerate + 30) as u32,
451         };
452
453         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2);
454
455         update_msg_2.feerate_per_kw = (initial_feerate + 40) as u32;
456         // Deliver (3)
457         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2);
458
459         // Deliver (1), generating (3) and (4)
460         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg);
461         let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
462         check_added_monitors!(nodes[0], 1);
463         assert!(as_second_update.update_add_htlcs.is_empty());
464         assert!(as_second_update.update_fulfill_htlcs.is_empty());
465         assert!(as_second_update.update_fail_htlcs.is_empty());
466         assert!(as_second_update.update_fail_malformed_htlcs.is_empty());
467         // Check that the update_fee newly generated matches what we delivered:
468         assert_eq!(as_second_update.update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id);
469         assert_eq!(as_second_update.update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw);
470
471         // Deliver (2) commitment_signed
472         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
473         let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
474         check_added_monitors!(nodes[0], 1);
475         // No commitment_signed so get_event_msg's assert(len == 1) passes
476
477         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg);
478         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
479         check_added_monitors!(nodes[1], 1);
480
481         // Delever (4)
482         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed);
483         let (bs_second_revoke, bs_second_commitment) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
484         check_added_monitors!(nodes[1], 1);
485
486         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke);
487         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
488         check_added_monitors!(nodes[0], 1);
489
490         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment);
491         let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
492         // No commitment_signed so get_event_msg's assert(len == 1) passes
493         check_added_monitors!(nodes[0], 1);
494
495         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke);
496         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
497         check_added_monitors!(nodes[1], 1);
498 }
499
500 fn do_test_sanity_on_in_flight_opens(steps: u8) {
501         // Previously, we had issues deserializing channels when we hadn't connected the first block
502         // after creation. To catch that and similar issues, we lean on the Node::drop impl to test
503         // serialization round-trips and simply do steps towards opening a channel and then drop the
504         // Node objects.
505
506         let chanmon_cfgs = create_chanmon_cfgs(2);
507         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
508         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
509         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
510
511         if steps & 0b1000_0000 != 0{
512                 let block = Block {
513                         header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
514                         txdata: vec![],
515                 };
516                 connect_block(&nodes[0], &block);
517                 connect_block(&nodes[1], &block);
518         }
519
520         if steps & 0x0f == 0 { return; }
521         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
522         let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
523
524         if steps & 0x0f == 1 { return; }
525         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
526         let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
527
528         if steps & 0x0f == 2 { return; }
529         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
530
531         let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
532
533         if steps & 0x0f == 3 { return; }
534         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
535         check_added_monitors!(nodes[0], 0);
536         let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
537
538         if steps & 0x0f == 4 { return; }
539         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
540         {
541                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
542                 assert_eq!(added_monitors.len(), 1);
543                 assert_eq!(added_monitors[0].0, funding_output);
544                 added_monitors.clear();
545         }
546         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
547
548         let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
549
550         if steps & 0x0f == 5 { return; }
551         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
552         {
553                 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
554                 assert_eq!(added_monitors.len(), 1);
555                 assert_eq!(added_monitors[0].0, funding_output);
556                 added_monitors.clear();
557         }
558
559         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
560         let events_4 = nodes[0].node.get_and_clear_pending_events();
561         assert_eq!(events_4.len(), 0);
562
563         if steps & 0x0f == 6 { return; }
564         create_chan_between_nodes_with_value_confirm_first(&nodes[0], &nodes[1], &tx, 2);
565
566         if steps & 0x0f == 7 { return; }
567         confirm_transaction_at(&nodes[0], &tx, 2);
568         connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
569         create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
570         expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id());
571 }
572
573 #[test]
574 fn test_sanity_on_in_flight_opens() {
575         do_test_sanity_on_in_flight_opens(0);
576         do_test_sanity_on_in_flight_opens(0 | 0b1000_0000);
577         do_test_sanity_on_in_flight_opens(1);
578         do_test_sanity_on_in_flight_opens(1 | 0b1000_0000);
579         do_test_sanity_on_in_flight_opens(2);
580         do_test_sanity_on_in_flight_opens(2 | 0b1000_0000);
581         do_test_sanity_on_in_flight_opens(3);
582         do_test_sanity_on_in_flight_opens(3 | 0b1000_0000);
583         do_test_sanity_on_in_flight_opens(4);
584         do_test_sanity_on_in_flight_opens(4 | 0b1000_0000);
585         do_test_sanity_on_in_flight_opens(5);
586         do_test_sanity_on_in_flight_opens(5 | 0b1000_0000);
587         do_test_sanity_on_in_flight_opens(6);
588         do_test_sanity_on_in_flight_opens(6 | 0b1000_0000);
589         do_test_sanity_on_in_flight_opens(7);
590         do_test_sanity_on_in_flight_opens(7 | 0b1000_0000);
591         do_test_sanity_on_in_flight_opens(8);
592         do_test_sanity_on_in_flight_opens(8 | 0b1000_0000);
593 }
594
595 #[test]
596 fn test_update_fee_vanilla() {
597         let chanmon_cfgs = create_chanmon_cfgs(2);
598         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
599         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
600         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
601         create_announced_chan_between_nodes(&nodes, 0, 1);
602
603         {
604                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
605                 *feerate_lock += 25;
606         }
607         nodes[0].node.timer_tick_occurred();
608         check_added_monitors!(nodes[0], 1);
609
610         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
611         assert_eq!(events_0.len(), 1);
612         let (update_msg, commitment_signed) = match events_0[0] {
613                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
614                         (update_fee.as_ref(), commitment_signed)
615                 },
616                 _ => panic!("Unexpected event"),
617         };
618         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
619
620         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
621         let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
622         check_added_monitors!(nodes[1], 1);
623
624         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
625         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
626         check_added_monitors!(nodes[0], 1);
627
628         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
629         let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
630         // No commitment_signed so get_event_msg's assert(len == 1) passes
631         check_added_monitors!(nodes[0], 1);
632
633         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
634         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
635         check_added_monitors!(nodes[1], 1);
636 }
637
638 #[test]
639 fn test_update_fee_that_funder_cannot_afford() {
640         let chanmon_cfgs = create_chanmon_cfgs(2);
641         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
642         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
643         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
644         let channel_value = 5000;
645         let push_sats = 700;
646         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, push_sats * 1000);
647         let channel_id = chan.2;
648         let secp_ctx = Secp256k1::new();
649         let default_config = UserConfig::default();
650         let bs_channel_reserve_sats = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(channel_value, &default_config);
651
652         let opt_anchors = false;
653
654         // Calculate the maximum feerate that A can afford. Note that we don't send an update_fee
655         // CONCURRENT_INBOUND_HTLC_FEE_BUFFER HTLCs before actually running out of local balance, so we
656         // calculate two different feerates here - the expected local limit as well as the expected
657         // remote limit.
658         let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / (commitment_tx_base_weight(opt_anchors) + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32;
659         let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / commitment_tx_base_weight(opt_anchors)) as u32;
660         {
661                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
662                 *feerate_lock = feerate;
663         }
664         nodes[0].node.timer_tick_occurred();
665         check_added_monitors!(nodes[0], 1);
666         let update_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
667
668         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg.update_fee.unwrap());
669
670         commitment_signed_dance!(nodes[1], nodes[0], update_msg.commitment_signed, false);
671
672         // Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate set above.
673         {
674                 let commitment_tx = get_local_commitment_txn!(nodes[1], channel_id)[0].clone();
675
676                 //We made sure neither party's funds are below the dust limit and there are no HTLCs here
677                 assert_eq!(commitment_tx.output.len(), 2);
678                 let total_fee: u64 = commit_tx_fee_msat(feerate, 0, opt_anchors) / 1000;
679                 let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value);
680                 actual_fee = channel_value - actual_fee;
681                 assert_eq!(total_fee, actual_fee);
682         }
683
684         {
685                 // Increment the feerate by a small constant, accounting for rounding errors
686                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
687                 *feerate_lock += 4;
688         }
689         nodes[0].node.timer_tick_occurred();
690         nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot afford to send new feerate at {}", feerate + 4), 1);
691         check_added_monitors!(nodes[0], 0);
692
693         const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654;
694
695         // Get the EnforcingSigner for each channel, which will be used to (1) get the keys
696         // needed to sign the new commitment tx and (2) sign the new commitment tx.
697         let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = {
698                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
699                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
700                 let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
701                 let chan_signer = local_chan.get_signer();
702                 let pubkeys = chan_signer.pubkeys();
703                 (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
704                  pubkeys.funding_pubkey)
705         };
706         let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point, remote_funding) = {
707                 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
708                 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
709                 let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
710                 let chan_signer = remote_chan.get_signer();
711                 let pubkeys = chan_signer.pubkeys();
712                 (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
713                  chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
714                  pubkeys.funding_pubkey)
715         };
716
717         // Assemble the set of keys we can use for signatures for our commitment_signed message.
718         let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint,
719                 &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint);
720
721         let res = {
722                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
723                 let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
724                 let local_chan = local_chan_lock.channel_by_id.get(&chan.2).unwrap();
725                 let local_chan_signer = local_chan.get_signer();
726                 let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![];
727                 let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
728                         INITIAL_COMMITMENT_NUMBER - 1,
729                         push_sats,
730                         channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, opt_anchors) / 1000,
731                         opt_anchors, local_funding, remote_funding,
732                         commit_tx_keys.clone(),
733                         non_buffer_feerate + 4,
734                         &mut htlcs,
735                         &local_chan.channel_transaction_parameters.as_counterparty_broadcastable()
736                 );
737                 local_chan_signer.sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap()
738         };
739
740         let commit_signed_msg = msgs::CommitmentSigned {
741                 channel_id: chan.2,
742                 signature: res.0,
743                 htlc_signatures: res.1
744         };
745
746         let update_fee = msgs::UpdateFee {
747                 channel_id: chan.2,
748                 feerate_per_kw: non_buffer_feerate + 4,
749         };
750
751         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_fee);
752
753         //While producing the commitment_signed response after handling a received update_fee request the
754         //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve)
755         //Should produce and error.
756         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
757         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1);
758         check_added_monitors!(nodes[1], 1);
759         check_closed_broadcast!(nodes[1], true);
760         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") });
761 }
762
763 #[test]
764 fn test_update_fee_with_fundee_update_add_htlc() {
765         let chanmon_cfgs = create_chanmon_cfgs(2);
766         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
767         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
768         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
769         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
770
771         // balancing
772         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
773
774         {
775                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
776                 *feerate_lock += 20;
777         }
778         nodes[0].node.timer_tick_occurred();
779         check_added_monitors!(nodes[0], 1);
780
781         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
782         assert_eq!(events_0.len(), 1);
783         let (update_msg, commitment_signed) = match events_0[0] {
784                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
785                         (update_fee.as_ref(), commitment_signed)
786                 },
787                 _ => panic!("Unexpected event"),
788         };
789         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
790         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
791         let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
792         check_added_monitors!(nodes[1], 1);
793
794         let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 800000);
795
796         // nothing happens since node[1] is in AwaitingRemoteRevoke
797         nodes[1].node.send_payment_with_route(&route, our_payment_hash,
798                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
799         {
800                 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
801                 assert_eq!(added_monitors.len(), 0);
802                 added_monitors.clear();
803         }
804         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
805         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
806         // node[1] has nothing to do
807
808         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
809         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
810         check_added_monitors!(nodes[0], 1);
811
812         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
813         let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
814         // No commitment_signed so get_event_msg's assert(len == 1) passes
815         check_added_monitors!(nodes[0], 1);
816         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
817         check_added_monitors!(nodes[1], 1);
818         // AwaitingRemoteRevoke ends here
819
820         let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
821         assert_eq!(commitment_update.update_add_htlcs.len(), 1);
822         assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0);
823         assert_eq!(commitment_update.update_fail_htlcs.len(), 0);
824         assert_eq!(commitment_update.update_fail_malformed_htlcs.len(), 0);
825         assert_eq!(commitment_update.update_fee.is_none(), true);
826
827         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &commitment_update.update_add_htlcs[0]);
828         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed);
829         check_added_monitors!(nodes[0], 1);
830         let (revoke, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
831
832         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke);
833         check_added_monitors!(nodes[1], 1);
834         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
835
836         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed);
837         check_added_monitors!(nodes[1], 1);
838         let revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
839         // No commitment_signed so get_event_msg's assert(len == 1) passes
840
841         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke);
842         check_added_monitors!(nodes[0], 1);
843         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
844
845         expect_pending_htlcs_forwardable!(nodes[0]);
846
847         let events = nodes[0].node.get_and_clear_pending_events();
848         assert_eq!(events.len(), 1);
849         match events[0] {
850                 Event::PaymentClaimable { .. } => { },
851                 _ => panic!("Unexpected event"),
852         };
853
854         claim_payment(&nodes[1], &vec!(&nodes[0])[..], our_payment_preimage);
855
856         send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
857         send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
858         close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
859         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
860         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
861 }
862
863 #[test]
864 fn test_update_fee() {
865         let chanmon_cfgs = create_chanmon_cfgs(2);
866         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
867         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
868         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
869         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
870         let channel_id = chan.2;
871
872         // A                                        B
873         // (1) update_fee/commitment_signed      ->
874         //                                       <- (2) revoke_and_ack
875         //                                       .- send (3) commitment_signed
876         // (4) update_fee/commitment_signed      ->
877         //                                       .- send (5) revoke_and_ack (no CS as we're awaiting a revoke)
878         //                                       <- (3) commitment_signed delivered
879         // send (6) revoke_and_ack               -.
880         //                                       <- (5) deliver revoke_and_ack
881         // (6) deliver revoke_and_ack            ->
882         //                                       .- send (7) commitment_signed in response to (4)
883         //                                       <- (7) deliver commitment_signed
884         // revoke_and_ack                        ->
885
886         // Create and deliver (1)...
887         let feerate;
888         {
889                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
890                 feerate = *feerate_lock;
891                 *feerate_lock = feerate + 20;
892         }
893         nodes[0].node.timer_tick_occurred();
894         check_added_monitors!(nodes[0], 1);
895
896         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
897         assert_eq!(events_0.len(), 1);
898         let (update_msg, commitment_signed) = match events_0[0] {
899                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
900                         (update_fee.as_ref(), commitment_signed)
901                 },
902                 _ => panic!("Unexpected event"),
903         };
904         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
905
906         // Generate (2) and (3):
907         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
908         let (revoke_msg, commitment_signed_0) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
909         check_added_monitors!(nodes[1], 1);
910
911         // Deliver (2):
912         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
913         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
914         check_added_monitors!(nodes[0], 1);
915
916         // Create and deliver (4)...
917         {
918                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
919                 *feerate_lock = feerate + 30;
920         }
921         nodes[0].node.timer_tick_occurred();
922         check_added_monitors!(nodes[0], 1);
923         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
924         assert_eq!(events_0.len(), 1);
925         let (update_msg, commitment_signed) = match events_0[0] {
926                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
927                         (update_fee.as_ref(), commitment_signed)
928                 },
929                 _ => panic!("Unexpected event"),
930         };
931
932         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
933         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
934         check_added_monitors!(nodes[1], 1);
935         // ... creating (5)
936         let revoke_msg = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
937         // No commitment_signed so get_event_msg's assert(len == 1) passes
938
939         // Handle (3), creating (6):
940         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_0);
941         check_added_monitors!(nodes[0], 1);
942         let revoke_msg_0 = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
943         // No commitment_signed so get_event_msg's assert(len == 1) passes
944
945         // Deliver (5):
946         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
947         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
948         check_added_monitors!(nodes[0], 1);
949
950         // Deliver (6), creating (7):
951         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg_0);
952         let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
953         assert!(commitment_update.update_add_htlcs.is_empty());
954         assert!(commitment_update.update_fulfill_htlcs.is_empty());
955         assert!(commitment_update.update_fail_htlcs.is_empty());
956         assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
957         assert!(commitment_update.update_fee.is_none());
958         check_added_monitors!(nodes[1], 1);
959
960         // Deliver (7)
961         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed);
962         check_added_monitors!(nodes[0], 1);
963         let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
964         // No commitment_signed so get_event_msg's assert(len == 1) passes
965
966         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
967         check_added_monitors!(nodes[1], 1);
968         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
969
970         assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30);
971         assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30);
972         close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
973         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
974         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
975 }
976
977 #[test]
978 fn fake_network_test() {
979         // Simple test which builds a network of ChannelManagers, connects them to each other, and
980         // tests that payments get routed and transactions broadcast in semi-reasonable ways.
981         let chanmon_cfgs = create_chanmon_cfgs(4);
982         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
983         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
984         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
985
986         // Create some initial channels
987         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
988         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
989         let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
990
991         // Rebalance the network a bit by relaying one payment through all the channels...
992         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
993         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
994         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
995         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
996
997         // Send some more payments
998         send_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 1000000);
999         send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1], &nodes[0])[..], 1000000);
1000         send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1])[..], 1000000);
1001
1002         // Test failure packets
1003         let payment_hash_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 1000000).1;
1004         fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], payment_hash_1);
1005
1006         // Add a new channel that skips 3
1007         let chan_4 = create_announced_chan_between_nodes(&nodes, 1, 3);
1008
1009         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 1000000);
1010         send_payment(&nodes[2], &vec!(&nodes[3])[..], 1000000);
1011         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1012         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1013         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1014         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1015         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1016
1017         // Do some rebalance loop payments, simultaneously
1018         let mut hops = Vec::with_capacity(3);
1019         hops.push(RouteHop {
1020                 pubkey: nodes[2].node.get_our_node_id(),
1021                 node_features: NodeFeatures::empty(),
1022                 short_channel_id: chan_2.0.contents.short_channel_id,
1023                 channel_features: ChannelFeatures::empty(),
1024                 fee_msat: 0,
1025                 cltv_expiry_delta: chan_3.0.contents.cltv_expiry_delta as u32
1026         });
1027         hops.push(RouteHop {
1028                 pubkey: nodes[3].node.get_our_node_id(),
1029                 node_features: NodeFeatures::empty(),
1030                 short_channel_id: chan_3.0.contents.short_channel_id,
1031                 channel_features: ChannelFeatures::empty(),
1032                 fee_msat: 0,
1033                 cltv_expiry_delta: chan_4.1.contents.cltv_expiry_delta as u32
1034         });
1035         hops.push(RouteHop {
1036                 pubkey: nodes[1].node.get_our_node_id(),
1037                 node_features: nodes[1].node.node_features(),
1038                 short_channel_id: chan_4.0.contents.short_channel_id,
1039                 channel_features: nodes[1].node.channel_features(),
1040                 fee_msat: 1000000,
1041                 cltv_expiry_delta: TEST_FINAL_CLTV,
1042         });
1043         hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
1044         hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
1045         let payment_preimage_1 = send_along_route(&nodes[1], Route { paths: vec![hops], payment_params: None }, &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0;
1046
1047         let mut hops = Vec::with_capacity(3);
1048         hops.push(RouteHop {
1049                 pubkey: nodes[3].node.get_our_node_id(),
1050                 node_features: NodeFeatures::empty(),
1051                 short_channel_id: chan_4.0.contents.short_channel_id,
1052                 channel_features: ChannelFeatures::empty(),
1053                 fee_msat: 0,
1054                 cltv_expiry_delta: chan_3.1.contents.cltv_expiry_delta as u32
1055         });
1056         hops.push(RouteHop {
1057                 pubkey: nodes[2].node.get_our_node_id(),
1058                 node_features: NodeFeatures::empty(),
1059                 short_channel_id: chan_3.0.contents.short_channel_id,
1060                 channel_features: ChannelFeatures::empty(),
1061                 fee_msat: 0,
1062                 cltv_expiry_delta: chan_2.1.contents.cltv_expiry_delta as u32
1063         });
1064         hops.push(RouteHop {
1065                 pubkey: nodes[1].node.get_our_node_id(),
1066                 node_features: nodes[1].node.node_features(),
1067                 short_channel_id: chan_2.0.contents.short_channel_id,
1068                 channel_features: nodes[1].node.channel_features(),
1069                 fee_msat: 1000000,
1070                 cltv_expiry_delta: TEST_FINAL_CLTV,
1071         });
1072         hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
1073         hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
1074         let payment_hash_2 = send_along_route(&nodes[1], Route { paths: vec![hops], payment_params: None }, &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1;
1075
1076         // Claim the rebalances...
1077         fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
1078         claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
1079
1080         // Close down the channels...
1081         close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
1082         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
1083         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
1084         close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
1085         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
1086         check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
1087         close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
1088         check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
1089         check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
1090         close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
1091         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
1092         check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
1093 }
1094
1095 #[test]
1096 fn holding_cell_htlc_counting() {
1097         // Tests that HTLCs in the holding cell count towards the pending HTLC limits on outbound HTLCs
1098         // to ensure we don't end up with HTLCs sitting around in our holding cell for several
1099         // commitment dance rounds.
1100         let chanmon_cfgs = create_chanmon_cfgs(3);
1101         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1102         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1103         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1104         create_announced_chan_between_nodes(&nodes, 0, 1);
1105         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
1106
1107         let mut payments = Vec::new();
1108         for _ in 0..crate::ln::channel::OUR_MAX_HTLCS {
1109                 let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
1110                 nodes[1].node.send_payment_with_route(&route, payment_hash,
1111                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
1112                 payments.push((payment_preimage, payment_hash));
1113         }
1114         check_added_monitors!(nodes[1], 1);
1115
1116         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
1117         assert_eq!(events.len(), 1);
1118         let initial_payment_event = SendEvent::from_event(events.pop().unwrap());
1119         assert_eq!(initial_payment_event.node_id, nodes[2].node.get_our_node_id());
1120
1121         // There is now one HTLC in an outbound commitment transaction and (OUR_MAX_HTLCS - 1) HTLCs in
1122         // the holding cell waiting on B's RAA to send. At this point we should not be able to add
1123         // another HTLC.
1124         let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
1125         {
1126                 unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, payment_hash_1,
1127                                 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)
1128                         ), true, APIError::ChannelUnavailable { ref err },
1129                         assert!(regex::Regex::new(r"Cannot push more than their max accepted HTLCs \(\d+\)").unwrap().is_match(err)));
1130                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1131                 nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot push more than their max accepted HTLCs", 1);
1132         }
1133
1134         // This should also be true if we try to forward a payment.
1135         let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
1136         {
1137                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1138                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1139                 check_added_monitors!(nodes[0], 1);
1140         }
1141
1142         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1143         assert_eq!(events.len(), 1);
1144         let payment_event = SendEvent::from_event(events.pop().unwrap());
1145         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1146
1147         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1148         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
1149         // We have to forward pending HTLCs twice - once tries to forward the payment forward (and
1150         // fails), the second will process the resulting failure and fail the HTLC backward.
1151         expect_pending_htlcs_forwardable!(nodes[1]);
1152         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
1153         check_added_monitors!(nodes[1], 1);
1154
1155         let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1156         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_updates.update_fail_htlcs[0]);
1157         commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true);
1158
1159         expect_payment_failed_with_update!(nodes[0], payment_hash_2, false, chan_2.0.contents.short_channel_id, false);
1160
1161         // Now forward all the pending HTLCs and claim them back
1162         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &initial_payment_event.msgs[0]);
1163         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &initial_payment_event.commitment_msg);
1164         check_added_monitors!(nodes[2], 1);
1165
1166         let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1167         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1168         check_added_monitors!(nodes[1], 1);
1169         let as_updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1170
1171         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed);
1172         check_added_monitors!(nodes[1], 1);
1173         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1174
1175         for ref update in as_updates.update_add_htlcs.iter() {
1176                 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), update);
1177         }
1178         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_updates.commitment_signed);
1179         check_added_monitors!(nodes[2], 1);
1180         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
1181         check_added_monitors!(nodes[2], 1);
1182         let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1183
1184         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1185         check_added_monitors!(nodes[1], 1);
1186         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed);
1187         check_added_monitors!(nodes[1], 1);
1188         let as_final_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1189
1190         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_final_raa);
1191         check_added_monitors!(nodes[2], 1);
1192
1193         expect_pending_htlcs_forwardable!(nodes[2]);
1194
1195         let events = nodes[2].node.get_and_clear_pending_events();
1196         assert_eq!(events.len(), payments.len());
1197         for (event, &(_, ref hash)) in events.iter().zip(payments.iter()) {
1198                 match event {
1199                         &Event::PaymentClaimable { ref payment_hash, .. } => {
1200                                 assert_eq!(*payment_hash, *hash);
1201                         },
1202                         _ => panic!("Unexpected event"),
1203                 };
1204         }
1205
1206         for (preimage, _) in payments.drain(..) {
1207                 claim_payment(&nodes[1], &[&nodes[2]], preimage);
1208         }
1209
1210         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
1211 }
1212
1213 #[test]
1214 fn duplicate_htlc_test() {
1215         // Test that we accept duplicate payment_hash HTLCs across the network and that
1216         // claiming/failing them are all separate and don't affect each other
1217         let chanmon_cfgs = create_chanmon_cfgs(6);
1218         let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
1219         let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs, &[None, None, None, None, None, None]);
1220         let mut nodes = create_network(6, &node_cfgs, &node_chanmgrs);
1221
1222         // Create some initial channels to route via 3 to 4/5 from 0/1/2
1223         create_announced_chan_between_nodes(&nodes, 0, 3);
1224         create_announced_chan_between_nodes(&nodes, 1, 3);
1225         create_announced_chan_between_nodes(&nodes, 2, 3);
1226         create_announced_chan_between_nodes(&nodes, 3, 4);
1227         create_announced_chan_between_nodes(&nodes, 3, 5);
1228
1229         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000);
1230
1231         *nodes[0].network_payment_count.borrow_mut() -= 1;
1232         assert_eq!(route_payment(&nodes[1], &vec!(&nodes[3])[..], 1000000).0, payment_preimage);
1233
1234         *nodes[0].network_payment_count.borrow_mut() -= 1;
1235         assert_eq!(route_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], 1000000).0, payment_preimage);
1236
1237         claim_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], payment_preimage);
1238         fail_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], payment_hash);
1239         claim_payment(&nodes[1], &vec!(&nodes[3])[..], payment_preimage);
1240 }
1241
1242 #[test]
1243 fn test_duplicate_htlc_different_direction_onchain() {
1244         // Test that ChannelMonitor doesn't generate 2 preimage txn
1245         // when we have 2 HTLCs with same preimage that go across a node
1246         // in opposite directions, even with the same payment secret.
1247         let chanmon_cfgs = create_chanmon_cfgs(2);
1248         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1249         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1250         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1251
1252         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1253
1254         // balancing
1255         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
1256
1257         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000);
1258
1259         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], 800_000);
1260         let node_a_payment_secret = nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
1261         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[0]]], 800_000, payment_hash, node_a_payment_secret);
1262
1263         // Provide preimage to node 0 by claiming payment
1264         nodes[0].node.claim_funds(payment_preimage);
1265         expect_payment_claimed!(nodes[0], payment_hash, 800_000);
1266         check_added_monitors!(nodes[0], 1);
1267
1268         // Broadcast node 1 commitment txn
1269         let remote_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
1270
1271         assert_eq!(remote_txn[0].output.len(), 4); // 1 local, 1 remote, 1 htlc inbound, 1 htlc outbound
1272         let mut has_both_htlcs = 0; // check htlcs match ones committed
1273         for outp in remote_txn[0].output.iter() {
1274                 if outp.value == 800_000 / 1000 {
1275                         has_both_htlcs += 1;
1276                 } else if outp.value == 900_000 / 1000 {
1277                         has_both_htlcs += 1;
1278                 }
1279         }
1280         assert_eq!(has_both_htlcs, 2);
1281
1282         mine_transaction(&nodes[0], &remote_txn[0]);
1283         check_added_monitors!(nodes[0], 1);
1284         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
1285         connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
1286
1287         let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
1288         assert_eq!(claim_txn.len(), 3);
1289
1290         check_spends!(claim_txn[0], remote_txn[0]); // Immediate HTLC claim with preimage
1291         check_spends!(claim_txn[1], remote_txn[0]);
1292         check_spends!(claim_txn[2], remote_txn[0]);
1293         let preimage_tx = &claim_txn[0];
1294         let (preimage_bump_tx, timeout_tx) = if claim_txn[1].input[0].previous_output == preimage_tx.input[0].previous_output {
1295                 (&claim_txn[1], &claim_txn[2])
1296         } else {
1297                 (&claim_txn[2], &claim_txn[1])
1298         };
1299
1300         assert_eq!(preimage_tx.input.len(), 1);
1301         assert_eq!(preimage_bump_tx.input.len(), 1);
1302
1303         assert_eq!(preimage_tx.input.len(), 1);
1304         assert_eq!(preimage_tx.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC 1 <--> 0, preimage tx
1305         assert_eq!(remote_txn[0].output[preimage_tx.input[0].previous_output.vout as usize].value, 800);
1306
1307         assert_eq!(timeout_tx.input.len(), 1);
1308         assert_eq!(timeout_tx.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // HTLC 0 <--> 1, timeout tx
1309         check_spends!(timeout_tx, remote_txn[0]);
1310         assert_eq!(remote_txn[0].output[timeout_tx.input[0].previous_output.vout as usize].value, 900);
1311
1312         let events = nodes[0].node.get_and_clear_pending_msg_events();
1313         assert_eq!(events.len(), 3);
1314         for e in events {
1315                 match e {
1316                         MessageSendEvent::BroadcastChannelUpdate { .. } => {},
1317                         MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
1318                                 assert_eq!(node_id, nodes[1].node.get_our_node_id());
1319                                 assert_eq!(msg.data, "Channel closed because commitment or closing transaction was confirmed on chain.");
1320                         },
1321                         MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
1322                                 assert!(update_add_htlcs.is_empty());
1323                                 assert!(update_fail_htlcs.is_empty());
1324                                 assert_eq!(update_fulfill_htlcs.len(), 1);
1325                                 assert!(update_fail_malformed_htlcs.is_empty());
1326                                 assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
1327                         },
1328                         _ => panic!("Unexpected event"),
1329                 }
1330         }
1331 }
1332
1333 #[test]
1334 fn test_basic_channel_reserve() {
1335         let chanmon_cfgs = create_chanmon_cfgs(2);
1336         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1337         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1338         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1339         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1340
1341         let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
1342         let channel_reserve = chan_stat.channel_reserve_msat;
1343
1344         // The 2* and +1 are for the fee spike reserve.
1345         let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], nodes[1], chan.2), 1 + 1, get_opt_anchors!(nodes[0], nodes[1], chan.2));
1346         let max_can_send = 5000000 - channel_reserve - commit_tx_fee;
1347         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send + 1);
1348         let err = nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1349                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).err().unwrap();
1350         match err {
1351                 PaymentSendFailure::AllFailedResendSafe(ref fails) => {
1352                         match &fails[0] {
1353                                 &APIError::ChannelUnavailable{ref err} =>
1354                                         assert!(regex::Regex::new(r"Cannot send value that would put our balance under counterparty-announced channel reserve value \(\d+\)").unwrap().is_match(err)),
1355                                 _ => panic!("Unexpected error variant"),
1356                         }
1357                 },
1358                 _ => panic!("Unexpected error variant"),
1359         }
1360         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1361         nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put our balance under counterparty-announced channel reserve value", 1);
1362
1363         send_payment(&nodes[0], &vec![&nodes[1]], max_can_send);
1364 }
1365
1366 #[test]
1367 fn test_fee_spike_violation_fails_htlc() {
1368         let chanmon_cfgs = create_chanmon_cfgs(2);
1369         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1370         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1371         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1372         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1373
1374         let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 3460001);
1375         // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1376         let secp_ctx = Secp256k1::new();
1377         let session_priv = SecretKey::from_slice(&[42; 32]).expect("RNG is bad!");
1378
1379         let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1;
1380
1381         let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
1382         let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 3460001, &Some(payment_secret), cur_height, &None).unwrap();
1383         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash);
1384         let msg = msgs::UpdateAddHTLC {
1385                 channel_id: chan.2,
1386                 htlc_id: 0,
1387                 amount_msat: htlc_msat,
1388                 payment_hash: payment_hash,
1389                 cltv_expiry: htlc_cltv,
1390                 onion_routing_packet: onion_packet,
1391         };
1392
1393         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
1394
1395         // Now manually create the commitment_signed message corresponding to the update_add
1396         // nodes[0] just sent. In the code for construction of this message, "local" refers
1397         // to the sender of the message, and "remote" refers to the receiver.
1398
1399         let feerate_per_kw = get_feerate!(nodes[0], nodes[1], chan.2);
1400
1401         const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
1402
1403         // Get the EnforcingSigner for each channel, which will be used to (1) get the keys
1404         // needed to sign the new commitment tx and (2) sign the new commitment tx.
1405         let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point, local_funding) = {
1406                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
1407                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
1408                 let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
1409                 let chan_signer = local_chan.get_signer();
1410                 // Make the signer believe we validated another commitment, so we can release the secret
1411                 chan_signer.get_enforcement_state().last_holder_commitment -= 1;
1412
1413                 let pubkeys = chan_signer.pubkeys();
1414                 (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
1415                  chan_signer.release_commitment_secret(INITIAL_COMMITMENT_NUMBER),
1416                  chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx),
1417                  chan_signer.pubkeys().funding_pubkey)
1418         };
1419         let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = {
1420                 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
1421                 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
1422                 let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
1423                 let chan_signer = remote_chan.get_signer();
1424                 let pubkeys = chan_signer.pubkeys();
1425                 (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
1426                  chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
1427                  chan_signer.pubkeys().funding_pubkey)
1428         };
1429
1430         // Assemble the set of keys we can use for signatures for our commitment_signed message.
1431         let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint,
1432                 &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint);
1433
1434         // Build the remote commitment transaction so we can sign it, and then later use the
1435         // signature for the commitment_signed message.
1436         let local_chan_balance = 1313;
1437
1438         let accepted_htlc_info = chan_utils::HTLCOutputInCommitment {
1439                 offered: false,
1440                 amount_msat: 3460001,
1441                 cltv_expiry: htlc_cltv,
1442                 payment_hash,
1443                 transaction_output_index: Some(1),
1444         };
1445
1446         let commitment_number = INITIAL_COMMITMENT_NUMBER - 1;
1447
1448         let res = {
1449                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
1450                 let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
1451                 let local_chan = local_chan_lock.channel_by_id.get(&chan.2).unwrap();
1452                 let local_chan_signer = local_chan.get_signer();
1453                 let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
1454                         commitment_number,
1455                         95000,
1456                         local_chan_balance,
1457                         local_chan.opt_anchors(), local_funding, remote_funding,
1458                         commit_tx_keys.clone(),
1459                         feerate_per_kw,
1460                         &mut vec![(accepted_htlc_info, ())],
1461                         &local_chan.channel_transaction_parameters.as_counterparty_broadcastable()
1462                 );
1463                 local_chan_signer.sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap()
1464         };
1465
1466         let commit_signed_msg = msgs::CommitmentSigned {
1467                 channel_id: chan.2,
1468                 signature: res.0,
1469                 htlc_signatures: res.1
1470         };
1471
1472         // Send the commitment_signed message to the nodes[1].
1473         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
1474         let _ = nodes[1].node.get_and_clear_pending_msg_events();
1475
1476         // Send the RAA to nodes[1].
1477         let raa_msg = msgs::RevokeAndACK {
1478                 channel_id: chan.2,
1479                 per_commitment_secret: local_secret,
1480                 next_per_commitment_point: next_local_point
1481         };
1482         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa_msg);
1483
1484         let events = nodes[1].node.get_and_clear_pending_msg_events();
1485         assert_eq!(events.len(), 1);
1486         // Make sure the HTLC failed in the way we expect.
1487         match events[0] {
1488                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, .. }, .. } => {
1489                         assert_eq!(update_fail_htlcs.len(), 1);
1490                         update_fail_htlcs[0].clone()
1491                 },
1492                 _ => panic!("Unexpected event"),
1493         };
1494         nodes[1].logger.assert_log("lightning::ln::channel".to_string(),
1495                 format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", ::hex::encode(raa_msg.channel_id)), 1);
1496
1497         check_added_monitors!(nodes[1], 2);
1498 }
1499
1500 #[test]
1501 fn test_chan_reserve_violation_outbound_htlc_inbound_chan() {
1502         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1503         // Set the fee rate for the channel very high, to the point where the fundee
1504         // sending any above-dust amount would result in a channel reserve violation.
1505         // In this test we check that we would be prevented from sending an HTLC in
1506         // this situation.
1507         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1508         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1509         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1510         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1511         let default_config = UserConfig::default();
1512         let opt_anchors = false;
1513
1514         let mut push_amt = 100_000_000;
1515         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
1516
1517         push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1518
1519         let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt);
1520
1521         // Sending exactly enough to hit the reserve amount should be accepted
1522         for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1523                 let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1524         }
1525
1526         // However one more HTLC should be significantly over the reserve amount and fail.
1527         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000);
1528         unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash,
1529                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1530                 ), true, APIError::ChannelUnavailable { ref err },
1531                 assert_eq!(err, "Cannot send value that would put counterparty balance under holder-announced channel reserve value"));
1532         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1533         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send value that would put counterparty balance under holder-announced channel reserve value".to_string(), 1);
1534 }
1535
1536 #[test]
1537 fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
1538         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1539         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1540         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1541         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1542         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1543         let default_config = UserConfig::default();
1544         let opt_anchors = false;
1545
1546         // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
1547         // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
1548         // transaction fee with 0 HTLCs (183 sats)).
1549         let mut push_amt = 100_000_000;
1550         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
1551         push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1552         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt);
1553
1554         // Send four HTLCs to cover the initial push_msat buffer we're required to include
1555         for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1556                 let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1557         }
1558
1559         let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 700_000);
1560         // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1561         let secp_ctx = Secp256k1::new();
1562         let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
1563         let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1;
1564         let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
1565         let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 700_000, &Some(payment_secret), cur_height, &None).unwrap();
1566         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash);
1567         let msg = msgs::UpdateAddHTLC {
1568                 channel_id: chan.2,
1569                 htlc_id: MIN_AFFORDABLE_HTLC_COUNT as u64,
1570                 amount_msat: htlc_msat,
1571                 payment_hash: payment_hash,
1572                 cltv_expiry: htlc_cltv,
1573                 onion_routing_packet: onion_packet,
1574         };
1575
1576         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg);
1577         // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
1578         nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string(), 1);
1579         assert_eq!(nodes[0].node.list_channels().len(), 0);
1580         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
1581         assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
1582         check_added_monitors!(nodes[0], 1);
1583         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() });
1584 }
1585
1586 #[test]
1587 fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() {
1588         // Test that if we receive many dust HTLCs over an outbound channel, they don't count when
1589         // calculating our commitment transaction fee (this was previously broken).
1590         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1591         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1592
1593         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1594         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
1595         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1596         let default_config = UserConfig::default();
1597         let opt_anchors = false;
1598
1599         // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
1600         // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
1601         // transaction fee with 0 HTLCs (183 sats)).
1602         let mut push_amt = 100_000_000;
1603         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
1604         push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1605         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt);
1606
1607         let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000
1608                 + feerate_per_kw as u64 * htlc_success_tx_weight(opt_anchors) / 1000 * 1000 - 1;
1609         // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel
1610         // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the
1611         // commitment transaction fee.
1612         let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], dust_amt);
1613
1614         // Send four HTLCs to cover the initial push_msat buffer we're required to include
1615         for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1616                 let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1617         }
1618
1619         // One more than the dust amt should fail, however.
1620         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt + 1);
1621         unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash,
1622                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1623                 ), true, APIError::ChannelUnavailable { ref err },
1624                 assert_eq!(err, "Cannot send value that would put counterparty balance under holder-announced channel reserve value"));
1625 }
1626
1627 #[test]
1628 fn test_chan_init_feerate_unaffordability() {
1629         // Test that we will reject channel opens which do not leave enough to pay for any HTLCs due to
1630         // channel reserve and feerate requirements.
1631         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1632         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1633         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1634         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1635         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1636         let default_config = UserConfig::default();
1637         let opt_anchors = false;
1638
1639         // Set the push_msat amount such that nodes[0] will not be able to afford to add even a single
1640         // HTLC.
1641         let mut push_amt = 100_000_000;
1642         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
1643         assert_eq!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt + 1, 42, None).unwrap_err(),
1644                 APIError::APIMisuseError { err: "Funding amount (356) can't even pay fee for initial commitment transaction fee of 357.".to_string() });
1645
1646         // During open, we don't have a "counterparty channel reserve" to check against, so that
1647         // requirement only comes into play on the open_channel handling side.
1648         push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1649         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt, 42, None).unwrap();
1650         let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
1651         open_channel_msg.push_msat += 1;
1652         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
1653
1654         let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
1655         assert_eq!(msg_events.len(), 1);
1656         match msg_events[0] {
1657                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
1658                         assert_eq!(msg.data, "Insufficient funding amount for initial reserve");
1659                 },
1660                 _ => panic!("Unexpected event"),
1661         }
1662 }
1663
1664 #[test]
1665 fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() {
1666         // Test that if we receive many dust HTLCs over an inbound channel, they don't count when
1667         // calculating our counterparty's commitment transaction fee (this was previously broken).
1668         let chanmon_cfgs = create_chanmon_cfgs(2);
1669         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1670         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
1671         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1672         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 98000000);
1673
1674         let payment_amt = 46000; // Dust amount
1675         // In the previous code, these first four payments would succeed.
1676         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1677         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1678         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1679         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1680
1681         // Then these next 5 would be interpreted by nodes[1] as violating the fee spike buffer.
1682         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1683         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1684         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1685         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1686         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1687
1688         // And this last payment previously resulted in nodes[1] closing on its inbound-channel
1689         // counterparty, because it counted all the previous dust HTLCs against nodes[0]'s commitment
1690         // transaction fee and therefore perceived this next payment as a channel reserve violation.
1691         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1692 }
1693
1694 #[test]
1695 fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
1696         let chanmon_cfgs = create_chanmon_cfgs(3);
1697         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1698         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1699         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1700         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1701         let _ = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000);
1702
1703         let feemsat = 239;
1704         let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat;
1705         let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
1706         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
1707         let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2);
1708
1709         // Add a 2* and +1 for the fee spike reserve.
1710         let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1, opt_anchors);
1711         let recv_value_1 = (chan_stat.value_to_self_msat - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlc)/2;
1712         let amt_msat_1 = recv_value_1 + total_routing_fee_msat;
1713
1714         // Add a pending HTLC.
1715         let (route_1, our_payment_hash_1, _, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat_1);
1716         let payment_event_1 = {
1717                 nodes[0].node.send_payment_with_route(&route_1, our_payment_hash_1,
1718                         RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
1719                 check_added_monitors!(nodes[0], 1);
1720
1721                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1722                 assert_eq!(events.len(), 1);
1723                 SendEvent::from_event(events.remove(0))
1724         };
1725         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
1726
1727         // Attempt to trigger a channel reserve violation --> payment failure.
1728         let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2, opt_anchors);
1729         let recv_value_2 = chan_stat.value_to_self_msat - amt_msat_1 - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlcs + 1;
1730         let amt_msat_2 = recv_value_2 + total_routing_fee_msat;
1731         let (route_2, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat_2);
1732
1733         // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1734         let secp_ctx = Secp256k1::new();
1735         let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
1736         let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1;
1737         let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route_2.paths[0], &session_priv).unwrap();
1738         let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route_2.paths[0], recv_value_2, &None, cur_height, &None).unwrap();
1739         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash_1);
1740         let msg = msgs::UpdateAddHTLC {
1741                 channel_id: chan.2,
1742                 htlc_id: 1,
1743                 amount_msat: htlc_msat + 1,
1744                 payment_hash: our_payment_hash_1,
1745                 cltv_expiry: htlc_cltv,
1746                 onion_routing_packet: onion_packet,
1747         };
1748
1749         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
1750         // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
1751         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote HTLC add would put them under remote reserve value".to_string(), 1);
1752         assert_eq!(nodes[1].node.list_channels().len(), 1);
1753         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
1754         assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
1755         check_added_monitors!(nodes[1], 1);
1756         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() });
1757 }
1758
1759 #[test]
1760 fn test_inbound_outbound_capacity_is_not_zero() {
1761         let chanmon_cfgs = create_chanmon_cfgs(2);
1762         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1763         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1764         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1765         let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1766         let channels0 = node_chanmgrs[0].list_channels();
1767         let channels1 = node_chanmgrs[1].list_channels();
1768         let default_config = UserConfig::default();
1769         assert_eq!(channels0.len(), 1);
1770         assert_eq!(channels1.len(), 1);
1771
1772         let reserve = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config);
1773         assert_eq!(channels0[0].inbound_capacity_msat, 95000000 - reserve*1000);
1774         assert_eq!(channels1[0].outbound_capacity_msat, 95000000 - reserve*1000);
1775
1776         assert_eq!(channels0[0].outbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
1777         assert_eq!(channels1[0].inbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
1778 }
1779
1780 fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64, opt_anchors: bool) -> u64 {
1781         (commitment_tx_base_weight(opt_anchors) + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000
1782 }
1783
1784 #[test]
1785 fn test_channel_reserve_holding_cell_htlcs() {
1786         let chanmon_cfgs = create_chanmon_cfgs(3);
1787         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1788         // When this test was written, the default base fee floated based on the HTLC count.
1789         // It is now fixed, so we simply set the fee to the expected value here.
1790         let mut config = test_default_channel_config();
1791         config.channel_config.forwarding_fee_base_msat = 239;
1792         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
1793         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1794         let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 190000, 1001);
1795         let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 190000, 1001);
1796
1797         let mut stat01 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
1798         let mut stat11 = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2);
1799
1800         let mut stat12 = get_channel_value_stat!(nodes[1], nodes[2], chan_2.2);
1801         let mut stat22 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2);
1802
1803         macro_rules! expect_forward {
1804                 ($node: expr) => {{
1805                         let mut events = $node.node.get_and_clear_pending_msg_events();
1806                         assert_eq!(events.len(), 1);
1807                         check_added_monitors!($node, 1);
1808                         let payment_event = SendEvent::from_event(events.remove(0));
1809                         payment_event
1810                 }}
1811         }
1812
1813         let feemsat = 239; // set above
1814         let total_fee_msat = (nodes.len() - 2) as u64 * feemsat;
1815         let feerate = get_feerate!(nodes[0], nodes[1], chan_1.2);
1816         let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan_1.2);
1817
1818         let recv_value_0 = stat01.counterparty_max_htlc_value_in_flight_msat - total_fee_msat;
1819
1820         // attempt to send amt_msat > their_max_htlc_value_in_flight_msat
1821         {
1822                 let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
1823                         .with_features(nodes[2].node.invoice_features()).with_max_channel_saturation_power_of_half(0);
1824                 let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, recv_value_0, TEST_FINAL_CLTV);
1825                 route.paths[0].last_mut().unwrap().fee_msat += 1;
1826                 assert!(route.paths[0].iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
1827
1828                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1829                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1830                         ), true, APIError::ChannelUnavailable { ref err },
1831                         assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err)));
1832                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1833                 nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put us over the max HTLC value in flight our peer will accept", 1);
1834         }
1835
1836         // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete
1837         // nodes[0]'s wealth
1838         loop {
1839                 let amt_msat = recv_value_0 + total_fee_msat;
1840                 // 3 for the 3 HTLCs that will be sent, 2* and +1 for the fee spike reserve.
1841                 // Also, ensure that each payment has enough to be over the dust limit to
1842                 // ensure it'll be included in each commit tx fee calculation.
1843                 let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, opt_anchors);
1844                 let ensure_htlc_amounts_above_dust_buffer = 3 * (stat01.counterparty_dust_limit_msat + 1000);
1845                 if stat01.value_to_self_msat < stat01.channel_reserve_msat + commit_tx_fee_all_htlcs + ensure_htlc_amounts_above_dust_buffer + amt_msat {
1846                         break;
1847                 }
1848
1849                 let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
1850                         .with_features(nodes[2].node.invoice_features()).with_max_channel_saturation_power_of_half(0);
1851                 let route = get_route!(nodes[0], payment_params, recv_value_0, TEST_FINAL_CLTV).unwrap();
1852                 let (payment_preimage, ..) = send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], recv_value_0);
1853                 claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
1854
1855                 let (stat01_, stat11_, stat12_, stat22_) = (
1856                         get_channel_value_stat!(nodes[0], nodes[1], chan_1.2),
1857                         get_channel_value_stat!(nodes[1], nodes[0], chan_1.2),
1858                         get_channel_value_stat!(nodes[1], nodes[2], chan_2.2),
1859                         get_channel_value_stat!(nodes[2], nodes[1], chan_2.2),
1860                 );
1861
1862                 assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat);
1863                 assert_eq!(stat11_.value_to_self_msat, stat11.value_to_self_msat + amt_msat);
1864                 assert_eq!(stat12_.value_to_self_msat, stat12.value_to_self_msat - (amt_msat - feemsat));
1865                 assert_eq!(stat22_.value_to_self_msat, stat22.value_to_self_msat + (amt_msat - feemsat));
1866                 stat01 = stat01_; stat11 = stat11_; stat12 = stat12_; stat22 = stat22_;
1867         }
1868
1869         // adding pending output.
1870         // 2* and +1 HTLCs on the commit tx fee for the fee spike reserve.
1871         // The reason we're dividing by two here is as follows: the dividend is the total outbound liquidity
1872         // after fees, the channel reserve, and the fee spike buffer are removed. We eventually want to
1873         // divide this quantity into 3 portions, that will each be sent in an HTLC. This allows us
1874         // to test channel channel reserve policy at the edges of what amount is sendable, i.e.
1875         // cases where 1 msat over X amount will cause a payment failure, but anything less than
1876         // that can be sent successfully. So, dividing by two is a somewhat arbitrary way of getting
1877         // the amount of the first of these aforementioned 3 payments. The reason we split into 3 payments
1878         // is to test the behavior of the holding cell with respect to channel reserve and commit tx fee
1879         // policy.
1880         let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1, opt_anchors);
1881         let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs)/2;
1882         let amt_msat_1 = recv_value_1 + total_fee_msat;
1883
1884         let (route_1, our_payment_hash_1, our_payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_1);
1885         let payment_event_1 = {
1886                 nodes[0].node.send_payment_with_route(&route_1, our_payment_hash_1,
1887                         RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
1888                 check_added_monitors!(nodes[0], 1);
1889
1890                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1891                 assert_eq!(events.len(), 1);
1892                 SendEvent::from_event(events.remove(0))
1893         };
1894         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
1895
1896         // channel reserve test with htlc pending output > 0
1897         let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs;
1898         {
1899                 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_2 + 1);
1900                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1901                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1902                         ), true, APIError::ChannelUnavailable { ref err },
1903                         assert!(regex::Regex::new(r"Cannot send value that would put our balance under counterparty-announced channel reserve value \(\d+\)").unwrap().is_match(err)));
1904                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1905         }
1906
1907         // split the rest to test holding cell
1908         let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, opt_anchors);
1909         let additional_htlc_cost_msat = commit_tx_fee_3_htlcs - commit_tx_fee_2_htlcs;
1910         let recv_value_21 = recv_value_2/2 - additional_htlc_cost_msat/2;
1911         let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat - additional_htlc_cost_msat;
1912         {
1913                 let stat = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
1914                 assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat + commit_tx_fee_3_htlcs), stat.channel_reserve_msat);
1915         }
1916
1917         // now see if they go through on both sides
1918         let (route_21, our_payment_hash_21, our_payment_preimage_21, our_payment_secret_21) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_21);
1919         // but this will stuck in the holding cell
1920         nodes[0].node.send_payment_with_route(&route_21, our_payment_hash_21,
1921                 RecipientOnionFields::secret_only(our_payment_secret_21), PaymentId(our_payment_hash_21.0)).unwrap();
1922         check_added_monitors!(nodes[0], 0);
1923         let events = nodes[0].node.get_and_clear_pending_events();
1924         assert_eq!(events.len(), 0);
1925
1926         // test with outbound holding cell amount > 0
1927         {
1928                 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22+1);
1929                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1930                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1931                         ), true, APIError::ChannelUnavailable { ref err },
1932                         assert!(regex::Regex::new(r"Cannot send value that would put our balance under counterparty-announced channel reserve value \(\d+\)").unwrap().is_match(err)));
1933                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1934                 nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put our balance under counterparty-announced channel reserve value", 2);
1935         }
1936
1937         let (route_22, our_payment_hash_22, our_payment_preimage_22, our_payment_secret_22) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22);
1938         // this will also stuck in the holding cell
1939         nodes[0].node.send_payment_with_route(&route_22, our_payment_hash_22,
1940                 RecipientOnionFields::secret_only(our_payment_secret_22), PaymentId(our_payment_hash_22.0)).unwrap();
1941         check_added_monitors!(nodes[0], 0);
1942         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
1943         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1944
1945         // flush the pending htlc
1946         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg);
1947         let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1948         check_added_monitors!(nodes[1], 1);
1949
1950         // the pending htlc should be promoted to committed
1951         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack);
1952         check_added_monitors!(nodes[0], 1);
1953         let commitment_update_2 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1954
1955         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed);
1956         let bs_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1957         // No commitment_signed so get_event_msg's assert(len == 1) passes
1958         check_added_monitors!(nodes[0], 1);
1959
1960         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack);
1961         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1962         check_added_monitors!(nodes[1], 1);
1963
1964         expect_pending_htlcs_forwardable!(nodes[1]);
1965
1966         let ref payment_event_11 = expect_forward!(nodes[1]);
1967         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]);
1968         commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false);
1969
1970         expect_pending_htlcs_forwardable!(nodes[2]);
1971         expect_payment_claimable!(nodes[2], our_payment_hash_1, our_payment_secret_1, recv_value_1);
1972
1973         // flush the htlcs in the holding cell
1974         assert_eq!(commitment_update_2.update_add_htlcs.len(), 2);
1975         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]);
1976         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]);
1977         commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false);
1978         expect_pending_htlcs_forwardable!(nodes[1]);
1979
1980         let ref payment_event_3 = expect_forward!(nodes[1]);
1981         assert_eq!(payment_event_3.msgs.len(), 2);
1982         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[0]);
1983         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]);
1984
1985         commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false);
1986         expect_pending_htlcs_forwardable!(nodes[2]);
1987
1988         let events = nodes[2].node.get_and_clear_pending_events();
1989         assert_eq!(events.len(), 2);
1990         match events[0] {
1991                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id: _ } => {
1992                         assert_eq!(our_payment_hash_21, *payment_hash);
1993                         assert_eq!(recv_value_21, amount_msat);
1994                         assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
1995                         assert_eq!(via_channel_id, Some(chan_2.2));
1996                         match &purpose {
1997                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
1998                                         assert!(payment_preimage.is_none());
1999                                         assert_eq!(our_payment_secret_21, *payment_secret);
2000                                 },
2001                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
2002                         }
2003                 },
2004                 _ => panic!("Unexpected event"),
2005         }
2006         match events[1] {
2007                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id: _ } => {
2008                         assert_eq!(our_payment_hash_22, *payment_hash);
2009                         assert_eq!(recv_value_22, amount_msat);
2010                         assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
2011                         assert_eq!(via_channel_id, Some(chan_2.2));
2012                         match &purpose {
2013                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
2014                                         assert!(payment_preimage.is_none());
2015                                         assert_eq!(our_payment_secret_22, *payment_secret);
2016                                 },
2017                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
2018                         }
2019                 },
2020                 _ => panic!("Unexpected event"),
2021         }
2022
2023         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_1);
2024         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21);
2025         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22);
2026
2027         let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1, opt_anchors);
2028         let recv_value_3 = commit_tx_fee_2_htlcs - commit_tx_fee_0_htlcs - total_fee_msat;
2029         send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_3);
2030
2031         let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors);
2032         let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat) - (recv_value_3 + total_fee_msat);
2033         let stat0 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
2034         assert_eq!(stat0.value_to_self_msat, expected_value_to_self);
2035         assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat + commit_tx_fee_1_htlc);
2036
2037         let stat2 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2);
2038         assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22 + recv_value_3);
2039 }
2040
2041 #[test]
2042 fn channel_reserve_in_flight_removes() {
2043         // In cases where one side claims an HTLC, it thinks it has additional available funds that it
2044         // can send to its counterparty, but due to update ordering, the other side may not yet have
2045         // considered those HTLCs fully removed.
2046         // This tests that we don't count HTLCs which will not be included in the next remote
2047         // commitment transaction towards the reserve value (as it implies no commitment transaction
2048         // will be generated which violates the remote reserve value).
2049         // This was broken previously, and discovered by the chanmon_fail_consistency fuzz test.
2050         // To test this we:
2051         //  * route two HTLCs from A to B (note that, at a high level, this test is checking that, when
2052         //    you consider the values of both of these HTLCs, B may not send an HTLC back to A, but if
2053         //    you only consider the value of the first HTLC, it may not),
2054         //  * start routing a third HTLC from A to B,
2055         //  * claim the first two HTLCs (though B will generate an update_fulfill for one, and put
2056         //    the other claim in its holding cell, as it immediately goes into AwaitingRAA),
2057         //  * deliver the first fulfill from B
2058         //  * deliver the update_add and an RAA from A, resulting in B freeing the second holding cell
2059         //    claim,
2060         //  * deliver A's response CS and RAA.
2061         //    This results in A having the second HTLC in AwaitingRemovedRemoteRevoke, but B having
2062         //    removed it fully. B now has the push_msat plus the first two HTLCs in value.
2063         //  * Now B happily sends another HTLC, potentially violating its reserve value from A's point
2064         //    of view (if A counts the AwaitingRemovedRemoteRevoke HTLC).
2065         let chanmon_cfgs = create_chanmon_cfgs(2);
2066         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2067         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2068         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2069         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2070
2071         let b_chan_values = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2);
2072         // Route the first two HTLCs.
2073         let payment_value_1 = b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000;
2074         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], payment_value_1);
2075         let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 20_000);
2076
2077         // Start routing the third HTLC (this is just used to get everyone in the right state).
2078         let (route, payment_hash_3, payment_preimage_3, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
2079         let send_1 = {
2080                 nodes[0].node.send_payment_with_route(&route, payment_hash_3,
2081                         RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
2082                 check_added_monitors!(nodes[0], 1);
2083                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2084                 assert_eq!(events.len(), 1);
2085                 SendEvent::from_event(events.remove(0))
2086         };
2087
2088         // Now claim both of the first two HTLCs on B's end, putting B in AwaitingRAA and generating an
2089         // initial fulfill/CS.
2090         nodes[1].node.claim_funds(payment_preimage_1);
2091         expect_payment_claimed!(nodes[1], payment_hash_1, payment_value_1);
2092         check_added_monitors!(nodes[1], 1);
2093         let bs_removes = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2094
2095         // This claim goes in B's holding cell, allowing us to have a pending B->A RAA which does not
2096         // remove the second HTLC when we send the HTLC back from B to A.
2097         nodes[1].node.claim_funds(payment_preimage_2);
2098         expect_payment_claimed!(nodes[1], payment_hash_2, 20_000);
2099         check_added_monitors!(nodes[1], 1);
2100         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2101
2102         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_removes.update_fulfill_htlcs[0]);
2103         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed);
2104         check_added_monitors!(nodes[0], 1);
2105         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2106         expect_payment_sent_without_paths!(nodes[0], payment_preimage_1);
2107
2108         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]);
2109         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg);
2110         check_added_monitors!(nodes[1], 1);
2111         // B is already AwaitingRAA, so cant generate a CS here
2112         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2113
2114         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2115         check_added_monitors!(nodes[1], 1);
2116         let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2117
2118         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2119         check_added_monitors!(nodes[0], 1);
2120         let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2121
2122         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2123         check_added_monitors!(nodes[1], 1);
2124         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2125
2126         // The second HTLCis removed, but as A is in AwaitingRAA it can't generate a CS here, so the
2127         // RAA that B generated above doesn't fully resolve the second HTLC from A's point of view.
2128         // However, the RAA A generates here *does* fully resolve the HTLC from B's point of view (as A
2129         // can no longer broadcast a commitment transaction with it and B has the preimage so can go
2130         // on-chain as necessary).
2131         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_cs.update_fulfill_htlcs[0]);
2132         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
2133         check_added_monitors!(nodes[0], 1);
2134         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2135         expect_payment_sent_without_paths!(nodes[0], payment_preimage_2);
2136
2137         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2138         check_added_monitors!(nodes[1], 1);
2139         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2140
2141         expect_pending_htlcs_forwardable!(nodes[1]);
2142         expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 100000);
2143
2144         // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't
2145         // resolve the second HTLC from A's point of view.
2146         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2147         check_added_monitors!(nodes[0], 1);
2148         expect_payment_path_successful!(nodes[0]);
2149         let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2150
2151         // Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back
2152         // to A to ensure that A doesn't count the almost-removed HTLC in update_add processing.
2153         let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[1], nodes[0], 10000);
2154         let send_2 = {
2155                 nodes[1].node.send_payment_with_route(&route, payment_hash_4,
2156                         RecipientOnionFields::secret_only(payment_secret_4), PaymentId(payment_hash_4.0)).unwrap();
2157                 check_added_monitors!(nodes[1], 1);
2158                 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
2159                 assert_eq!(events.len(), 1);
2160                 SendEvent::from_event(events.remove(0))
2161         };
2162
2163         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_2.msgs[0]);
2164         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_2.commitment_msg);
2165         check_added_monitors!(nodes[0], 1);
2166         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2167
2168         // Now just resolve all the outstanding messages/HTLCs for completeness...
2169
2170         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2171         check_added_monitors!(nodes[1], 1);
2172         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2173
2174         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2175         check_added_monitors!(nodes[1], 1);
2176
2177         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2178         check_added_monitors!(nodes[0], 1);
2179         expect_payment_path_successful!(nodes[0]);
2180         let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2181
2182         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2183         check_added_monitors!(nodes[1], 1);
2184         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2185
2186         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2187         check_added_monitors!(nodes[0], 1);
2188
2189         expect_pending_htlcs_forwardable!(nodes[0]);
2190         expect_payment_claimable!(nodes[0], payment_hash_4, payment_secret_4, 10000);
2191
2192         claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4);
2193         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
2194 }
2195
2196 #[test]
2197 fn channel_monitor_network_test() {
2198         // Simple test which builds a network of ChannelManagers, connects them to each other, and
2199         // tests that ChannelMonitor is able to recover from various states.
2200         let chanmon_cfgs = create_chanmon_cfgs(5);
2201         let node_cfgs = create_node_cfgs(5, &chanmon_cfgs);
2202         let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &[None, None, None, None, None]);
2203         let nodes = create_network(5, &node_cfgs, &node_chanmgrs);
2204
2205         // Create some initial channels
2206         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2207         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
2208         let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
2209         let chan_4 = create_announced_chan_between_nodes(&nodes, 3, 4);
2210
2211         // Make sure all nodes are at the same starting height
2212         connect_blocks(&nodes[0], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
2213         connect_blocks(&nodes[1], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
2214         connect_blocks(&nodes[2], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
2215         connect_blocks(&nodes[3], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[3].best_block_info().1);
2216         connect_blocks(&nodes[4], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[4].best_block_info().1);
2217
2218         // Rebalance the network a bit by relaying one payment through all the channels...
2219         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2220         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2221         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2222         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2223
2224         // Simple case with no pending HTLCs:
2225         nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
2226         check_added_monitors!(nodes[1], 1);
2227         check_closed_broadcast!(nodes[1], true);
2228         {
2229                 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
2230                 assert_eq!(node_txn.len(), 1);
2231                 mine_transaction(&nodes[0], &node_txn[0]);
2232                 check_added_monitors!(nodes[0], 1);
2233                 test_txn_broadcast(&nodes[0], &chan_1, Some(node_txn[0].clone()), HTLCType::NONE);
2234         }
2235         check_closed_broadcast!(nodes[0], true);
2236         assert_eq!(nodes[0].node.list_channels().len(), 0);
2237         assert_eq!(nodes[1].node.list_channels().len(), 1);
2238         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
2239         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
2240
2241         // One pending HTLC is discarded by the force-close:
2242         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000);
2243
2244         // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
2245         // broadcasted until we reach the timelock time).
2246         nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
2247         check_closed_broadcast!(nodes[1], true);
2248         check_added_monitors!(nodes[1], 1);
2249         {
2250                 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE);
2251                 connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1);
2252                 test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
2253                 mine_transaction(&nodes[2], &node_txn[0]);
2254                 check_added_monitors!(nodes[2], 1);
2255                 test_txn_broadcast(&nodes[2], &chan_2, Some(node_txn[0].clone()), HTLCType::NONE);
2256         }
2257         check_closed_broadcast!(nodes[2], true);
2258         assert_eq!(nodes[1].node.list_channels().len(), 0);
2259         assert_eq!(nodes[2].node.list_channels().len(), 1);
2260         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
2261         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
2262
2263         macro_rules! claim_funds {
2264                 ($node: expr, $prev_node: expr, $preimage: expr, $payment_hash: expr) => {
2265                         {
2266                                 $node.node.claim_funds($preimage);
2267                                 expect_payment_claimed!($node, $payment_hash, 3_000_000);
2268                                 check_added_monitors!($node, 1);
2269
2270                                 let events = $node.node.get_and_clear_pending_msg_events();
2271                                 assert_eq!(events.len(), 1);
2272                                 match events[0] {
2273                                         MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. } } => {
2274                                                 assert!(update_add_htlcs.is_empty());
2275                                                 assert!(update_fail_htlcs.is_empty());
2276                                                 assert_eq!(*node_id, $prev_node.node.get_our_node_id());
2277                                         },
2278                                         _ => panic!("Unexpected event"),
2279                                 };
2280                         }
2281                 }
2282         }
2283
2284         // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
2285         // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
2286         nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap();
2287         check_added_monitors!(nodes[2], 1);
2288         check_closed_broadcast!(nodes[2], true);
2289         let node2_commitment_txid;
2290         {
2291                 let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE);
2292                 connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1);
2293                 test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT);
2294                 node2_commitment_txid = node_txn[0].txid();
2295
2296                 // Claim the payment on nodes[3], giving it knowledge of the preimage
2297                 claim_funds!(nodes[3], nodes[2], payment_preimage_1, payment_hash_1);
2298                 mine_transaction(&nodes[3], &node_txn[0]);
2299                 check_added_monitors!(nodes[3], 1);
2300                 check_preimage_claim(&nodes[3], &node_txn);
2301         }
2302         check_closed_broadcast!(nodes[3], true);
2303         assert_eq!(nodes[2].node.list_channels().len(), 0);
2304         assert_eq!(nodes[3].node.list_channels().len(), 1);
2305         check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
2306         check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
2307
2308         // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
2309         // confusing us in the following tests.
2310         let chan_3_mon = nodes[3].chain_monitor.chain_monitor.remove_monitor(&OutPoint { txid: chan_3.3.txid(), index: 0 });
2311
2312         // One pending HTLC to time out:
2313         let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[3], &[&nodes[4]], 3_000_000);
2314         // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for
2315         // buffer space).
2316
2317         let (close_chan_update_1, close_chan_update_2) = {
2318                 connect_blocks(&nodes[3], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
2319                 let events = nodes[3].node.get_and_clear_pending_msg_events();
2320                 assert_eq!(events.len(), 2);
2321                 let close_chan_update_1 = match events[0] {
2322                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
2323                                 msg.clone()
2324                         },
2325                         _ => panic!("Unexpected event"),
2326                 };
2327                 match events[1] {
2328                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id } => {
2329                                 assert_eq!(node_id, nodes[4].node.get_our_node_id());
2330                         },
2331                         _ => panic!("Unexpected event"),
2332                 }
2333                 check_added_monitors!(nodes[3], 1);
2334
2335                 // Clear bumped claiming txn spending node 2 commitment tx. Bumped txn are generated after reaching some height timer.
2336                 {
2337                         let mut node_txn = nodes[3].tx_broadcaster.txn_broadcasted.lock().unwrap();
2338                         node_txn.retain(|tx| {
2339                                 if tx.input[0].previous_output.txid == node2_commitment_txid {
2340                                         false
2341                                 } else { true }
2342                         });
2343                 }
2344
2345                 let node_txn = test_txn_broadcast(&nodes[3], &chan_4, None, HTLCType::TIMEOUT);
2346
2347                 // Claim the payment on nodes[4], giving it knowledge of the preimage
2348                 claim_funds!(nodes[4], nodes[3], payment_preimage_2, payment_hash_2);
2349
2350                 connect_blocks(&nodes[4], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2);
2351                 let events = nodes[4].node.get_and_clear_pending_msg_events();
2352                 assert_eq!(events.len(), 2);
2353                 let close_chan_update_2 = match events[0] {
2354                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
2355                                 msg.clone()
2356                         },
2357                         _ => panic!("Unexpected event"),
2358                 };
2359                 match events[1] {
2360                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id } => {
2361                                 assert_eq!(node_id, nodes[3].node.get_our_node_id());
2362                         },
2363                         _ => panic!("Unexpected event"),
2364                 }
2365                 check_added_monitors!(nodes[4], 1);
2366                 test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
2367
2368                 mine_transaction(&nodes[4], &node_txn[0]);
2369                 check_preimage_claim(&nodes[4], &node_txn);
2370                 (close_chan_update_1, close_chan_update_2)
2371         };
2372         nodes[3].gossip_sync.handle_channel_update(&close_chan_update_2).unwrap();
2373         nodes[4].gossip_sync.handle_channel_update(&close_chan_update_1).unwrap();
2374         assert_eq!(nodes[3].node.list_channels().len(), 0);
2375         assert_eq!(nodes[4].node.list_channels().len(), 0);
2376
2377         assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon),
2378                 ChannelMonitorUpdateStatus::Completed);
2379         check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
2380         check_closed_event!(nodes[4], 1, ClosureReason::CommitmentTxConfirmed);
2381 }
2382
2383 #[test]
2384 fn test_justice_tx() {
2385         // Test justice txn built on revoked HTLC-Success tx, against both sides
2386         let mut alice_config = UserConfig::default();
2387         alice_config.channel_handshake_config.announced_channel = true;
2388         alice_config.channel_handshake_limits.force_announced_channel_preference = false;
2389         alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5;
2390         let mut bob_config = UserConfig::default();
2391         bob_config.channel_handshake_config.announced_channel = true;
2392         bob_config.channel_handshake_limits.force_announced_channel_preference = false;
2393         bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3;
2394         let user_cfgs = [Some(alice_config), Some(bob_config)];
2395         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2396         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2397         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
2398         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2399         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
2400         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2401         *nodes[0].connect_style.borrow_mut() = ConnectStyle::FullBlockViaListen;
2402         // Create some new channels:
2403         let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1);
2404
2405         // A pending HTLC which will be revoked:
2406         let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2407         // Get the will-be-revoked local txn from nodes[0]
2408         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_5.2);
2409         assert_eq!(revoked_local_txn.len(), 2); // First commitment tx, then HTLC tx
2410         assert_eq!(revoked_local_txn[0].input.len(), 1);
2411         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_5.3.txid());
2412         assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to 0 are present
2413         assert_eq!(revoked_local_txn[1].input.len(), 1);
2414         assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
2415         assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
2416         // Revoke the old state
2417         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
2418
2419         {
2420                 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2421                 {
2422                         let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
2423                         assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2424                         assert_eq!(node_txn[0].input.len(), 2); // We should claim the revoked output and the HTLC output
2425
2426                         check_spends!(node_txn[0], revoked_local_txn[0]);
2427                         node_txn.swap_remove(0);
2428                 }
2429                 check_added_monitors!(nodes[1], 1);
2430                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
2431                 test_txn_broadcast(&nodes[1], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
2432
2433                 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2434                 connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
2435                 // Verify broadcast of revoked HTLC-timeout
2436                 let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
2437                 check_added_monitors!(nodes[0], 1);
2438                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
2439                 // Broadcast revoked HTLC-timeout on node 1
2440                 mine_transaction(&nodes[1], &node_txn[1]);
2441                 test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone());
2442         }
2443         get_announce_close_broadcast_events(&nodes, 0, 1);
2444
2445         assert_eq!(nodes[0].node.list_channels().len(), 0);
2446         assert_eq!(nodes[1].node.list_channels().len(), 0);
2447
2448         // We test justice_tx build by A on B's revoked HTLC-Success tx
2449         // Create some new channels:
2450         let chan_6 = create_announced_chan_between_nodes(&nodes, 0, 1);
2451         {
2452                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
2453                 node_txn.clear();
2454         }
2455
2456         // A pending HTLC which will be revoked:
2457         let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2458         // Get the will-be-revoked local txn from B
2459         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_6.2);
2460         assert_eq!(revoked_local_txn.len(), 1); // Only commitment tx
2461         assert_eq!(revoked_local_txn[0].input.len(), 1);
2462         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_6.3.txid());
2463         assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to A are present
2464         // Revoke the old state
2465         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_4);
2466         {
2467                 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2468                 {
2469                         let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
2470                         assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2471                         assert_eq!(node_txn[0].input.len(), 1); // We claim the received HTLC output
2472
2473                         check_spends!(node_txn[0], revoked_local_txn[0]);
2474                         node_txn.swap_remove(0);
2475                 }
2476                 check_added_monitors!(nodes[0], 1);
2477                 test_txn_broadcast(&nodes[0], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
2478
2479                 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2480                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
2481                 let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
2482                 check_added_monitors!(nodes[1], 1);
2483                 mine_transaction(&nodes[0], &node_txn[1]);
2484                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
2485                 test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone());
2486         }
2487         get_announce_close_broadcast_events(&nodes, 0, 1);
2488         assert_eq!(nodes[0].node.list_channels().len(), 0);
2489         assert_eq!(nodes[1].node.list_channels().len(), 0);
2490 }
2491
2492 #[test]
2493 fn revoked_output_claim() {
2494         // Simple test to ensure a node will claim a revoked output when a stale remote commitment
2495         // transaction is broadcast by its counterparty
2496         let chanmon_cfgs = create_chanmon_cfgs(2);
2497         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2498         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2499         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2500         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2501         // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output
2502         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2503         assert_eq!(revoked_local_txn.len(), 1);
2504         // Only output is the full channel value back to nodes[0]:
2505         assert_eq!(revoked_local_txn[0].output.len(), 1);
2506         // Send a payment through, updating everyone's latest commitment txn
2507         send_payment(&nodes[0], &vec!(&nodes[1])[..], 5000000);
2508
2509         // Inform nodes[1] that nodes[0] broadcast a stale tx
2510         mine_transaction(&nodes[1], &revoked_local_txn[0]);
2511         check_added_monitors!(nodes[1], 1);
2512         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
2513         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
2514         assert_eq!(node_txn.len(), 1); // ChannelMonitor: justice tx against revoked to_local output
2515
2516         check_spends!(node_txn[0], revoked_local_txn[0]);
2517
2518         // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
2519         mine_transaction(&nodes[0], &revoked_local_txn[0]);
2520         get_announce_close_broadcast_events(&nodes, 0, 1);
2521         check_added_monitors!(nodes[0], 1);
2522         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
2523 }
2524
2525 #[test]
2526 fn claim_htlc_outputs_shared_tx() {
2527         // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx
2528         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2529         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2530         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2531         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2532         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2533
2534         // Create some new channel:
2535         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2536
2537         // Rebalance the network to generate htlc in the two directions
2538         send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
2539         // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx
2540         let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
2541         let (_payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
2542
2543         // Get the will-be-revoked local txn from node[0]
2544         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2545         assert_eq!(revoked_local_txn.len(), 2); // commitment tx + 1 HTLC-Timeout tx
2546         assert_eq!(revoked_local_txn[0].input.len(), 1);
2547         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
2548         assert_eq!(revoked_local_txn[1].input.len(), 1);
2549         assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
2550         assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
2551         check_spends!(revoked_local_txn[1], revoked_local_txn[0]);
2552
2553         //Revoke the old state
2554         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
2555
2556         {
2557                 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2558                 check_added_monitors!(nodes[0], 1);
2559                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
2560                 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2561                 check_added_monitors!(nodes[1], 1);
2562                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
2563                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2564                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
2565
2566                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2567                 assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2568
2569                 assert_eq!(node_txn[0].input.len(), 3); // Claim the revoked output + both revoked HTLC outputs
2570                 check_spends!(node_txn[0], revoked_local_txn[0]);
2571
2572                 let mut witness_lens = BTreeSet::new();
2573                 witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
2574                 witness_lens.insert(node_txn[0].input[1].witness.last().unwrap().len());
2575                 witness_lens.insert(node_txn[0].input[2].witness.last().unwrap().len());
2576                 assert_eq!(witness_lens.len(), 3);
2577                 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
2578                 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
2579                 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
2580
2581                 // Finally, mine the penalty transaction and check that we get an HTLC failure after
2582                 // ANTI_REORG_DELAY confirmations.
2583                 mine_transaction(&nodes[1], &node_txn[0]);
2584                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2585                 expect_payment_failed!(nodes[1], payment_hash_2, false);
2586         }
2587         get_announce_close_broadcast_events(&nodes, 0, 1);
2588         assert_eq!(nodes[0].node.list_channels().len(), 0);
2589         assert_eq!(nodes[1].node.list_channels().len(), 0);
2590 }
2591
2592 #[test]
2593 fn claim_htlc_outputs_single_tx() {
2594         // Node revoked old state, htlcs have timed out, claim each of them in separated justice tx
2595         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2596         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2597         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2598         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2599         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2600
2601         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2602
2603         // Rebalance the network to generate htlc in the two directions
2604         send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
2605         // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this
2606         // time as two different claim transactions as we're gonna to timeout htlc with given a high current height
2607         let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
2608         let (_payment_preimage_2, payment_hash_2, _payment_secret_2) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
2609
2610         // Get the will-be-revoked local txn from node[0]
2611         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2612
2613         //Revoke the old state
2614         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
2615
2616         {
2617                 confirm_transaction_at(&nodes[0], &revoked_local_txn[0], 100);
2618                 check_added_monitors!(nodes[0], 1);
2619                 confirm_transaction_at(&nodes[1], &revoked_local_txn[0], 100);
2620                 check_added_monitors!(nodes[1], 1);
2621                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
2622                 let mut events = nodes[0].node.get_and_clear_pending_events();
2623                 expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
2624                 match events.last().unwrap() {
2625                         Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
2626                         _ => panic!("Unexpected event"),
2627                 }
2628
2629                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2630                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
2631
2632                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2633                 assert_eq!(node_txn.len(), 7);
2634
2635                 // Check the pair local commitment and HTLC-timeout broadcast due to HTLC expiration
2636                 assert_eq!(node_txn[0].input.len(), 1);
2637                 check_spends!(node_txn[0], chan_1.3);
2638                 assert_eq!(node_txn[1].input.len(), 1);
2639                 let witness_script = node_txn[1].input[0].witness.last().unwrap();
2640                 assert_eq!(witness_script.len(), OFFERED_HTLC_SCRIPT_WEIGHT); //Spending an offered htlc output
2641                 check_spends!(node_txn[1], node_txn[0]);
2642
2643                 // Justice transactions are indices 2-3-4
2644                 assert_eq!(node_txn[2].input.len(), 1);
2645                 assert_eq!(node_txn[3].input.len(), 1);
2646                 assert_eq!(node_txn[4].input.len(), 1);
2647
2648                 check_spends!(node_txn[2], revoked_local_txn[0]);
2649                 check_spends!(node_txn[3], revoked_local_txn[0]);
2650                 check_spends!(node_txn[4], revoked_local_txn[0]);
2651
2652                 let mut witness_lens = BTreeSet::new();
2653                 witness_lens.insert(node_txn[2].input[0].witness.last().unwrap().len());
2654                 witness_lens.insert(node_txn[3].input[0].witness.last().unwrap().len());
2655                 witness_lens.insert(node_txn[4].input[0].witness.last().unwrap().len());
2656                 assert_eq!(witness_lens.len(), 3);
2657                 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
2658                 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
2659                 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
2660
2661                 // Finally, mine the penalty transactions and check that we get an HTLC failure after
2662                 // ANTI_REORG_DELAY confirmations.
2663                 mine_transaction(&nodes[1], &node_txn[2]);
2664                 mine_transaction(&nodes[1], &node_txn[3]);
2665                 mine_transaction(&nodes[1], &node_txn[4]);
2666                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2667                 expect_payment_failed!(nodes[1], payment_hash_2, false);
2668         }
2669         get_announce_close_broadcast_events(&nodes, 0, 1);
2670         assert_eq!(nodes[0].node.list_channels().len(), 0);
2671         assert_eq!(nodes[1].node.list_channels().len(), 0);
2672 }
2673
2674 #[test]
2675 fn test_htlc_on_chain_success() {
2676         // Test that in case of a unilateral close onchain, we detect the state of output and pass
2677         // the preimage backward accordingly. So here we test that ChannelManager is
2678         // broadcasting the right event to other nodes in payment path.
2679         // We test with two HTLCs simultaneously as that was not handled correctly in the past.
2680         // A --------------------> B ----------------------> C (preimage)
2681         // First, C should claim the HTLC outputs via HTLC-Success when its own latest local
2682         // commitment transaction was broadcast.
2683         // Then, B should learn the preimage from said transactions, attempting to claim backwards
2684         // towards B.
2685         // B should be able to claim via preimage if A then broadcasts its local tx.
2686         // Finally, when A sees B's latest local commitment transaction it should be able to claim
2687         // the HTLC outputs via the preimage it learned (which, once confirmed should generate a
2688         // PaymentSent event).
2689
2690         let chanmon_cfgs = create_chanmon_cfgs(3);
2691         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2692         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2693         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2694
2695         // Create some initial channels
2696         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2697         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
2698
2699         // Ensure all nodes are at the same height
2700         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
2701         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
2702         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
2703         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
2704
2705         // Rebalance the network a bit by relaying one payment through all the channels...
2706         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2707         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2708
2709         let (our_payment_preimage, payment_hash_1, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
2710         let (our_payment_preimage_2, payment_hash_2, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
2711
2712         // Broadcast legit commitment tx from C on B's chain
2713         // Broadcast HTLC Success transaction by C on received output from C's commitment tx on B's chain
2714         let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
2715         assert_eq!(commitment_tx.len(), 1);
2716         check_spends!(commitment_tx[0], chan_2.3);
2717         nodes[2].node.claim_funds(our_payment_preimage);
2718         expect_payment_claimed!(nodes[2], payment_hash_1, 3_000_000);
2719         nodes[2].node.claim_funds(our_payment_preimage_2);
2720         expect_payment_claimed!(nodes[2], payment_hash_2, 3_000_000);
2721         check_added_monitors!(nodes[2], 2);
2722         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2723         assert!(updates.update_add_htlcs.is_empty());
2724         assert!(updates.update_fail_htlcs.is_empty());
2725         assert!(updates.update_fail_malformed_htlcs.is_empty());
2726         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2727
2728         mine_transaction(&nodes[2], &commitment_tx[0]);
2729         check_closed_broadcast!(nodes[2], true);
2730         check_added_monitors!(nodes[2], 1);
2731         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
2732         let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 2 (2 * HTLC-Success tx)
2733         assert_eq!(node_txn.len(), 2);
2734         check_spends!(node_txn[0], commitment_tx[0]);
2735         check_spends!(node_txn[1], commitment_tx[0]);
2736         assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2737         assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2738         assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2739         assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2740         assert_eq!(node_txn[0].lock_time.0, 0);
2741         assert_eq!(node_txn[1].lock_time.0, 0);
2742
2743         // Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward
2744         let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42};
2745         connect_block(&nodes[1], &Block { header, txdata: vec![commitment_tx[0].clone(), node_txn[0].clone(), node_txn[1].clone()]});
2746         connect_blocks(&nodes[1], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
2747         {
2748                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
2749                 assert_eq!(added_monitors.len(), 1);
2750                 assert_eq!(added_monitors[0].0.txid, chan_2.3.txid());
2751                 added_monitors.clear();
2752         }
2753         let forwarded_events = nodes[1].node.get_and_clear_pending_events();
2754         assert_eq!(forwarded_events.len(), 3);
2755         match forwarded_events[0] {
2756                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
2757                 _ => panic!("Unexpected event"),
2758         }
2759         let chan_id = Some(chan_1.2);
2760         match forwarded_events[1] {
2761                 Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => {
2762                         assert_eq!(fee_earned_msat, Some(1000));
2763                         assert_eq!(prev_channel_id, chan_id);
2764                         assert_eq!(claim_from_onchain_tx, true);
2765                         assert_eq!(next_channel_id, Some(chan_2.2));
2766                         assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
2767                 },
2768                 _ => panic!()
2769         }
2770         match forwarded_events[2] {
2771                 Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => {
2772                         assert_eq!(fee_earned_msat, Some(1000));
2773                         assert_eq!(prev_channel_id, chan_id);
2774                         assert_eq!(claim_from_onchain_tx, true);
2775                         assert_eq!(next_channel_id, Some(chan_2.2));
2776                         assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
2777                 },
2778                 _ => panic!()
2779         }
2780         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
2781         {
2782                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
2783                 assert_eq!(added_monitors.len(), 2);
2784                 assert_eq!(added_monitors[0].0.txid, chan_1.3.txid());
2785                 assert_eq!(added_monitors[1].0.txid, chan_1.3.txid());
2786                 added_monitors.clear();
2787         }
2788         assert_eq!(events.len(), 3);
2789
2790         let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
2791         let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events);
2792
2793         match nodes_2_event {
2794                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id: _ } => {},
2795                 _ => panic!("Unexpected event"),
2796         }
2797
2798         match nodes_0_event {
2799                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
2800                         assert!(update_add_htlcs.is_empty());
2801                         assert!(update_fail_htlcs.is_empty());
2802                         assert_eq!(update_fulfill_htlcs.len(), 1);
2803                         assert!(update_fail_malformed_htlcs.is_empty());
2804                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
2805                 },
2806                 _ => panic!("Unexpected event"),
2807         };
2808
2809         // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
2810         match events[0] {
2811                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
2812                 _ => panic!("Unexpected event"),
2813         }
2814
2815         macro_rules! check_tx_local_broadcast {
2816                 ($node: expr, $htlc_offered: expr, $commitment_tx: expr) => { {
2817                         let mut node_txn = $node.tx_broadcaster.txn_broadcasted.lock().unwrap();
2818                         assert_eq!(node_txn.len(), 2);
2819                         // Node[1]: 2 * HTLC-timeout tx
2820                         // Node[0]: 2 * HTLC-timeout tx
2821                         check_spends!(node_txn[0], $commitment_tx);
2822                         check_spends!(node_txn[1], $commitment_tx);
2823                         assert_ne!(node_txn[0].lock_time.0, 0);
2824                         assert_ne!(node_txn[1].lock_time.0, 0);
2825                         if $htlc_offered {
2826                                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2827                                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2828                                 assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2829                                 assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2830                         } else {
2831                                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2832                                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2833                                 assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
2834                                 assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
2835                         }
2836                         node_txn.clear();
2837                 } }
2838         }
2839         // nodes[1] now broadcasts its own timeout-claim of the output that nodes[2] just claimed via success.
2840         check_tx_local_broadcast!(nodes[1], false, commitment_tx[0]);
2841
2842         // Broadcast legit commitment tx from A on B's chain
2843         // Broadcast preimage tx by B on offered output from A commitment tx  on A's chain
2844         let node_a_commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
2845         check_spends!(node_a_commitment_tx[0], chan_1.3);
2846         mine_transaction(&nodes[1], &node_a_commitment_tx[0]);
2847         check_closed_broadcast!(nodes[1], true);
2848         check_added_monitors!(nodes[1], 1);
2849         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
2850         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
2851         assert!(node_txn.len() == 1 || node_txn.len() == 3); // HTLC-Success, 2* RBF bumps of above HTLC txn
2852         let commitment_spend =
2853                 if node_txn.len() == 1 {
2854                         &node_txn[0]
2855                 } else {
2856                         // Certain `ConnectStyle`s will cause RBF bumps of the previous HTLC transaction to be broadcast.
2857                         // FullBlockViaListen
2858                         if node_txn[0].input[0].previous_output.txid == node_a_commitment_tx[0].txid() {
2859                                 check_spends!(node_txn[1], commitment_tx[0]);
2860                                 check_spends!(node_txn[2], commitment_tx[0]);
2861                                 assert_ne!(node_txn[1].input[0].previous_output.vout, node_txn[2].input[0].previous_output.vout);
2862                                 &node_txn[0]
2863                         } else {
2864                                 check_spends!(node_txn[0], commitment_tx[0]);
2865                                 check_spends!(node_txn[1], commitment_tx[0]);
2866                                 assert_ne!(node_txn[0].input[0].previous_output.vout, node_txn[1].input[0].previous_output.vout);
2867                                 &node_txn[2]
2868                         }
2869                 };
2870
2871         check_spends!(commitment_spend, node_a_commitment_tx[0]);
2872         assert_eq!(commitment_spend.input.len(), 2);
2873         assert_eq!(commitment_spend.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2874         assert_eq!(commitment_spend.input[1].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2875         assert_eq!(commitment_spend.lock_time.0, nodes[1].best_block_info().1 + 1);
2876         assert!(commitment_spend.output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
2877         // We don't bother to check that B can claim the HTLC output on its commitment tx here as
2878         // we already checked the same situation with A.
2879
2880         // Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent
2881         let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42};
2882         connect_block(&nodes[0], &Block { header, txdata: vec![node_a_commitment_tx[0].clone(), commitment_spend.clone()] });
2883         connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32 - 1); // Confirm blocks until the HTLC expires
2884         check_closed_broadcast!(nodes[0], true);
2885         check_added_monitors!(nodes[0], 1);
2886         let events = nodes[0].node.get_and_clear_pending_events();
2887         assert_eq!(events.len(), 5);
2888         let mut first_claimed = false;
2889         for event in events {
2890                 match event {
2891                         Event::PaymentSent { payment_preimage, payment_hash, .. } => {
2892                                 if payment_preimage == our_payment_preimage && payment_hash == payment_hash_1 {
2893                                         assert!(!first_claimed);
2894                                         first_claimed = true;
2895                                 } else {
2896                                         assert_eq!(payment_preimage, our_payment_preimage_2);
2897                                         assert_eq!(payment_hash, payment_hash_2);
2898                                 }
2899                         },
2900                         Event::PaymentPathSuccessful { .. } => {},
2901                         Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {},
2902                         _ => panic!("Unexpected event"),
2903                 }
2904         }
2905         check_tx_local_broadcast!(nodes[0], true, node_a_commitment_tx[0]);
2906 }
2907
2908 fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
2909         // Test that in case of a unilateral close onchain, we detect the state of output and
2910         // timeout the HTLC backward accordingly. So here we test that ChannelManager is
2911         // broadcasting the right event to other nodes in payment path.
2912         // A ------------------> B ----------------------> C (timeout)
2913         //    B's commitment tx                 C's commitment tx
2914         //            \                                  \
2915         //         B's HTLC timeout tx               B's timeout tx
2916
2917         let chanmon_cfgs = create_chanmon_cfgs(3);
2918         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2919         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2920         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2921         *nodes[0].connect_style.borrow_mut() = connect_style;
2922         *nodes[1].connect_style.borrow_mut() = connect_style;
2923         *nodes[2].connect_style.borrow_mut() = connect_style;
2924
2925         // Create some intial channels
2926         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2927         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
2928
2929         // Rebalance the network a bit by relaying one payment thorugh all the channels...
2930         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2931         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2932
2933         let (_payment_preimage, payment_hash, _payment_secret) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
2934
2935         // Broadcast legit commitment tx from C on B's chain
2936         let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
2937         check_spends!(commitment_tx[0], chan_2.3);
2938         nodes[2].node.fail_htlc_backwards(&payment_hash);
2939         check_added_monitors!(nodes[2], 0);
2940         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]);
2941         check_added_monitors!(nodes[2], 1);
2942
2943         let events = nodes[2].node.get_and_clear_pending_msg_events();
2944         assert_eq!(events.len(), 1);
2945         match events[0] {
2946                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
2947                         assert!(update_add_htlcs.is_empty());
2948                         assert!(!update_fail_htlcs.is_empty());
2949                         assert!(update_fulfill_htlcs.is_empty());
2950                         assert!(update_fail_malformed_htlcs.is_empty());
2951                         assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
2952                 },
2953                 _ => panic!("Unexpected event"),
2954         };
2955         mine_transaction(&nodes[2], &commitment_tx[0]);
2956         check_closed_broadcast!(nodes[2], true);
2957         check_added_monitors!(nodes[2], 1);
2958         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
2959         let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
2960         assert_eq!(node_txn.len(), 0);
2961
2962         // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain
2963         // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
2964         connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1);
2965         mine_transaction(&nodes[1], &commitment_tx[0]);
2966         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
2967         let timeout_tx;
2968         {
2969                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
2970                 assert_eq!(node_txn.len(), 3); // 2 (local commitment tx + HTLC-timeout), 1 timeout tx
2971
2972                 check_spends!(node_txn[2], commitment_tx[0]);
2973                 assert_eq!(node_txn[2].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2974
2975                 check_spends!(node_txn[0], chan_2.3);
2976                 check_spends!(node_txn[1], node_txn[0]);
2977                 assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), 71);
2978                 assert_eq!(node_txn[1].clone().input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2979
2980                 timeout_tx = node_txn[2].clone();
2981                 node_txn.clear();
2982         }
2983
2984         mine_transaction(&nodes[1], &timeout_tx);
2985         check_added_monitors!(nodes[1], 1);
2986         check_closed_broadcast!(nodes[1], true);
2987
2988         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2989
2990         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
2991         check_added_monitors!(nodes[1], 1);
2992         let events = nodes[1].node.get_and_clear_pending_msg_events();
2993         assert_eq!(events.len(), 1);
2994         match events[0] {
2995                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
2996                         assert!(update_add_htlcs.is_empty());
2997                         assert!(!update_fail_htlcs.is_empty());
2998                         assert!(update_fulfill_htlcs.is_empty());
2999                         assert!(update_fail_malformed_htlcs.is_empty());
3000                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3001                 },
3002                 _ => panic!("Unexpected event"),
3003         };
3004
3005         // Broadcast legit commitment tx from B on A's chain
3006         let commitment_tx = get_local_commitment_txn!(nodes[1], chan_1.2);
3007         check_spends!(commitment_tx[0], chan_1.3);
3008
3009         mine_transaction(&nodes[0], &commitment_tx[0]);
3010         connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32 - 1); // Confirm blocks until the HTLC expires
3011
3012         check_closed_broadcast!(nodes[0], true);
3013         check_added_monitors!(nodes[0], 1);
3014         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
3015         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // 1 timeout tx
3016         assert_eq!(node_txn.len(), 1);
3017         check_spends!(node_txn[0], commitment_tx[0]);
3018         assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3019 }
3020
3021 #[test]
3022 fn test_htlc_on_chain_timeout() {
3023         do_test_htlc_on_chain_timeout(ConnectStyle::BestBlockFirstSkippingBlocks);
3024         do_test_htlc_on_chain_timeout(ConnectStyle::TransactionsFirstSkippingBlocks);
3025         do_test_htlc_on_chain_timeout(ConnectStyle::FullBlockViaListen);
3026 }
3027
3028 #[test]
3029 fn test_simple_commitment_revoked_fail_backward() {
3030         // Test that in case of a revoked commitment tx, we detect the resolution of output by justice tx
3031         // and fail backward accordingly.
3032
3033         let chanmon_cfgs = create_chanmon_cfgs(3);
3034         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3035         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3036         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3037
3038         // Create some initial channels
3039         create_announced_chan_between_nodes(&nodes, 0, 1);
3040         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3041
3042         let (payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
3043         // Get the will-be-revoked local txn from nodes[2]
3044         let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
3045         // Revoke the old state
3046         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
3047
3048         let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
3049
3050         mine_transaction(&nodes[1], &revoked_local_txn[0]);
3051         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
3052         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3053         check_added_monitors!(nodes[1], 1);
3054         check_closed_broadcast!(nodes[1], true);
3055
3056         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
3057         check_added_monitors!(nodes[1], 1);
3058         let events = nodes[1].node.get_and_clear_pending_msg_events();
3059         assert_eq!(events.len(), 1);
3060         match events[0] {
3061                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
3062                         assert!(update_add_htlcs.is_empty());
3063                         assert_eq!(update_fail_htlcs.len(), 1);
3064                         assert!(update_fulfill_htlcs.is_empty());
3065                         assert!(update_fail_malformed_htlcs.is_empty());
3066                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3067
3068                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
3069                         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
3070                         expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_2.0.contents.short_channel_id, true);
3071                 },
3072                 _ => panic!("Unexpected event"),
3073         }
3074 }
3075
3076 fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use_dust: bool, no_to_remote: bool) {
3077         // Test that if our counterparty broadcasts a revoked commitment transaction we fail all
3078         // pending HTLCs on that channel backwards even if the HTLCs aren't present in our latest
3079         // commitment transaction anymore.
3080         // To do this, we have the peer which will broadcast a revoked commitment transaction send
3081         // a number of update_fail/commitment_signed updates without ever sending the RAA in
3082         // response to our commitment_signed. This is somewhat misbehavior-y, though not
3083         // technically disallowed and we should probably handle it reasonably.
3084         // Note that this is pretty exhaustive as an outbound HTLC which we haven't yet
3085         // failed/fulfilled backwards must be in at least one of the latest two remote commitment
3086         // transactions:
3087         // * Once we move it out of our holding cell/add it, we will immediately include it in a
3088         //   commitment_signed (implying it will be in the latest remote commitment transaction).
3089         // * Once they remove it, we will send a (the first) commitment_signed without the HTLC,
3090         //   and once they revoke the previous commitment transaction (allowing us to send a new
3091         //   commitment_signed) we will be free to fail/fulfill the HTLC backwards.
3092         let chanmon_cfgs = create_chanmon_cfgs(3);
3093         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3094         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3095         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3096
3097         // Create some initial channels
3098         create_announced_chan_between_nodes(&nodes, 0, 1);
3099         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3100
3101         let (payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], if no_to_remote { 10_000 } else { 3_000_000 });
3102         // Get the will-be-revoked local txn from nodes[2]
3103         let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
3104         assert_eq!(revoked_local_txn[0].output.len(), if no_to_remote { 1 } else { 2 });
3105         // Revoke the old state
3106         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
3107
3108         let value = if use_dust {
3109                 // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as
3110                 // well, so HTLCs at exactly the dust limit will not be included in commitment txn.
3111                 nodes[2].node.per_peer_state.read().unwrap().get(&nodes[1].node.get_our_node_id())
3112                         .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().holder_dust_limit_satoshis * 1000
3113         } else { 3000000 };
3114
3115         let (_, first_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3116         let (_, second_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3117         let (_, third_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3118
3119         nodes[2].node.fail_htlc_backwards(&first_payment_hash);
3120         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]);
3121         check_added_monitors!(nodes[2], 1);
3122         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3123         assert!(updates.update_add_htlcs.is_empty());
3124         assert!(updates.update_fulfill_htlcs.is_empty());
3125         assert!(updates.update_fail_malformed_htlcs.is_empty());
3126         assert_eq!(updates.update_fail_htlcs.len(), 1);
3127         assert!(updates.update_fee.is_none());
3128         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3129         let bs_raa = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
3130         // Drop the last RAA from 3 -> 2
3131
3132         nodes[2].node.fail_htlc_backwards(&second_payment_hash);
3133         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: second_payment_hash }]);
3134         check_added_monitors!(nodes[2], 1);
3135         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3136         assert!(updates.update_add_htlcs.is_empty());
3137         assert!(updates.update_fulfill_htlcs.is_empty());
3138         assert!(updates.update_fail_malformed_htlcs.is_empty());
3139         assert_eq!(updates.update_fail_htlcs.len(), 1);
3140         assert!(updates.update_fee.is_none());
3141         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3142         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
3143         check_added_monitors!(nodes[1], 1);
3144         // Note that nodes[1] is in AwaitingRAA, so won't send a CS
3145         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
3146         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
3147         check_added_monitors!(nodes[2], 1);
3148
3149         nodes[2].node.fail_htlc_backwards(&third_payment_hash);
3150         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: third_payment_hash }]);
3151         check_added_monitors!(nodes[2], 1);
3152         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3153         assert!(updates.update_add_htlcs.is_empty());
3154         assert!(updates.update_fulfill_htlcs.is_empty());
3155         assert!(updates.update_fail_malformed_htlcs.is_empty());
3156         assert_eq!(updates.update_fail_htlcs.len(), 1);
3157         assert!(updates.update_fee.is_none());
3158         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3159         // At this point first_payment_hash has dropped out of the latest two commitment
3160         // transactions that nodes[1] is tracking...
3161         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
3162         check_added_monitors!(nodes[1], 1);
3163         // Note that nodes[1] is (still) in AwaitingRAA, so won't send a CS
3164         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
3165         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
3166         check_added_monitors!(nodes[2], 1);
3167
3168         // Add a fourth HTLC, this one will get sequestered away in nodes[1]'s holding cell waiting
3169         // on nodes[2]'s RAA.
3170         let (route, fourth_payment_hash, _, fourth_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 1000000);
3171         nodes[1].node.send_payment_with_route(&route, fourth_payment_hash,
3172                 RecipientOnionFields::secret_only(fourth_payment_secret), PaymentId(fourth_payment_hash.0)).unwrap();
3173         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3174         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3175         check_added_monitors!(nodes[1], 0);
3176
3177         if deliver_bs_raa {
3178                 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_raa);
3179                 // One monitor for the new revocation preimage, no second on as we won't generate a new
3180                 // commitment transaction for nodes[0] until process_pending_htlc_forwards().
3181                 check_added_monitors!(nodes[1], 1);
3182                 let events = nodes[1].node.get_and_clear_pending_events();
3183                 assert_eq!(events.len(), 2);
3184                 match events[0] {
3185                         Event::PendingHTLCsForwardable { .. } => { },
3186                         _ => panic!("Unexpected event"),
3187                 };
3188                 match events[1] {
3189                         Event::HTLCHandlingFailed { .. } => { },
3190                         _ => panic!("Unexpected event"),
3191                 }
3192                 // Deliberately don't process the pending fail-back so they all fail back at once after
3193                 // block connection just like the !deliver_bs_raa case
3194         }
3195
3196         let mut failed_htlcs = HashSet::new();
3197         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3198
3199         mine_transaction(&nodes[1], &revoked_local_txn[0]);
3200         check_added_monitors!(nodes[1], 1);
3201         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3202
3203         let events = nodes[1].node.get_and_clear_pending_events();
3204         assert_eq!(events.len(), if deliver_bs_raa { 3 + nodes.len() - 1 } else { 4 + nodes.len() });
3205         match events[0] {
3206                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { },
3207                 _ => panic!("Unexepected event"),
3208         }
3209         match events[1] {
3210                 Event::PaymentPathFailed { ref payment_hash, .. } => {
3211                         assert_eq!(*payment_hash, fourth_payment_hash);
3212                 },
3213                 _ => panic!("Unexpected event"),
3214         }
3215         match events[2] {
3216                 Event::PaymentFailed { ref payment_hash, .. } => {
3217                         assert_eq!(*payment_hash, fourth_payment_hash);
3218                 },
3219                 _ => panic!("Unexpected event"),
3220         }
3221
3222         nodes[1].node.process_pending_htlc_forwards();
3223         check_added_monitors!(nodes[1], 1);
3224
3225         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
3226         assert_eq!(events.len(), if deliver_bs_raa { 4 } else { 3 });
3227
3228         if deliver_bs_raa {
3229                 let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
3230                 match nodes_2_event {
3231                         MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3232                                 assert_eq!(nodes[2].node.get_our_node_id(), *node_id);
3233                                 assert_eq!(update_add_htlcs.len(), 1);
3234                                 assert!(update_fulfill_htlcs.is_empty());
3235                                 assert!(update_fail_htlcs.is_empty());
3236                                 assert!(update_fail_malformed_htlcs.is_empty());
3237                         },
3238                         _ => panic!("Unexpected event"),
3239                 }
3240         }
3241
3242         let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
3243         match nodes_2_event {
3244                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id, ref data } }, node_id: _ } => {
3245                         assert_eq!(channel_id, chan_2.2);
3246                         assert_eq!(data.as_str(), "Channel closed because commitment or closing transaction was confirmed on chain.");
3247                 },
3248                 _ => panic!("Unexpected event"),
3249         }
3250
3251         let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events);
3252         match nodes_0_event {
3253                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
3254                         assert!(update_add_htlcs.is_empty());
3255                         assert_eq!(update_fail_htlcs.len(), 3);
3256                         assert!(update_fulfill_htlcs.is_empty());
3257                         assert!(update_fail_malformed_htlcs.is_empty());
3258                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3259
3260                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
3261                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[1]);
3262                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[2]);
3263
3264                         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
3265
3266                         let events = nodes[0].node.get_and_clear_pending_events();
3267                         assert_eq!(events.len(), 6);
3268                         match events[0] {
3269                                 Event::PaymentPathFailed { ref payment_hash, ref failure, .. } => {
3270                                         assert!(failed_htlcs.insert(payment_hash.0));
3271                                         // If we delivered B's RAA we got an unknown preimage error, not something
3272                                         // that we should update our routing table for.
3273                                         if !deliver_bs_raa {
3274                                                 if let PathFailure::OnPath { network_update: Some(_) } = failure { } else { panic!("Unexpected path failure") }
3275                                         }
3276                                 },
3277                                 _ => panic!("Unexpected event"),
3278                         }
3279                         match events[1] {
3280                                 Event::PaymentFailed { ref payment_hash, .. } => {
3281                                         assert_eq!(*payment_hash, first_payment_hash);
3282                                 },
3283                                 _ => panic!("Unexpected event"),
3284                         }
3285                         match events[2] {
3286                                 Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => {
3287                                         assert!(failed_htlcs.insert(payment_hash.0));
3288                                 },
3289                                 _ => panic!("Unexpected event"),
3290                         }
3291                         match events[3] {
3292                                 Event::PaymentFailed { ref payment_hash, .. } => {
3293                                         assert_eq!(*payment_hash, second_payment_hash);
3294                                 },
3295                                 _ => panic!("Unexpected event"),
3296                         }
3297                         match events[4] {
3298                                 Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => {
3299                                         assert!(failed_htlcs.insert(payment_hash.0));
3300                                 },
3301                                 _ => panic!("Unexpected event"),
3302                         }
3303                         match events[5] {
3304                                 Event::PaymentFailed { ref payment_hash, .. } => {
3305                                         assert_eq!(*payment_hash, third_payment_hash);
3306                                 },
3307                                 _ => panic!("Unexpected event"),
3308                         }
3309                 },
3310                 _ => panic!("Unexpected event"),
3311         }
3312
3313         // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
3314         match events[0] {
3315                 MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {},
3316                 _ => panic!("Unexpected event"),
3317         }
3318
3319         assert!(failed_htlcs.contains(&first_payment_hash.0));
3320         assert!(failed_htlcs.contains(&second_payment_hash.0));
3321         assert!(failed_htlcs.contains(&third_payment_hash.0));
3322 }
3323
3324 #[test]
3325 fn test_commitment_revoked_fail_backward_exhaustive_a() {
3326         do_test_commitment_revoked_fail_backward_exhaustive(false, true, false);
3327         do_test_commitment_revoked_fail_backward_exhaustive(true, true, false);
3328         do_test_commitment_revoked_fail_backward_exhaustive(false, false, false);
3329         do_test_commitment_revoked_fail_backward_exhaustive(true, false, false);
3330 }
3331
3332 #[test]
3333 fn test_commitment_revoked_fail_backward_exhaustive_b() {
3334         do_test_commitment_revoked_fail_backward_exhaustive(false, true, true);
3335         do_test_commitment_revoked_fail_backward_exhaustive(true, true, true);
3336         do_test_commitment_revoked_fail_backward_exhaustive(false, false, true);
3337         do_test_commitment_revoked_fail_backward_exhaustive(true, false, true);
3338 }
3339
3340 #[test]
3341 fn fail_backward_pending_htlc_upon_channel_failure() {
3342         let chanmon_cfgs = create_chanmon_cfgs(2);
3343         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3344         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3345         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3346         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000);
3347
3348         // Alice -> Bob: Route a payment but without Bob sending revoke_and_ack.
3349         {
3350                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000);
3351                 nodes[0].node.send_payment_with_route(&route, payment_hash, RecipientOnionFields::secret_only(payment_secret),
3352                         PaymentId(payment_hash.0)).unwrap();
3353                 check_added_monitors!(nodes[0], 1);
3354
3355                 let payment_event = {
3356                         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3357                         assert_eq!(events.len(), 1);
3358                         SendEvent::from_event(events.remove(0))
3359                 };
3360                 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
3361                 assert_eq!(payment_event.msgs.len(), 1);
3362         }
3363
3364         // Alice -> Bob: Route another payment but now Alice waits for Bob's earlier revoke_and_ack.
3365         let (route, failed_payment_hash, _, failed_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000);
3366         {
3367                 nodes[0].node.send_payment_with_route(&route, failed_payment_hash,
3368                         RecipientOnionFields::secret_only(failed_payment_secret), PaymentId(failed_payment_hash.0)).unwrap();
3369                 check_added_monitors!(nodes[0], 0);
3370
3371                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3372         }
3373
3374         // Alice <- Bob: Send a malformed update_add_htlc so Alice fails the channel.
3375         {
3376                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 50_000);
3377
3378                 let secp_ctx = Secp256k1::new();
3379                 let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
3380                 let current_height = nodes[1].node.best_block.read().unwrap().height() + 1;
3381                 let (onion_payloads, _amount_msat, cltv_expiry) = onion_utils::build_onion_payloads(&route.paths[0], 50_000, &Some(payment_secret), current_height, &None).unwrap();
3382                 let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
3383                 let onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash);
3384
3385                 // Send a 0-msat update_add_htlc to fail the channel.
3386                 let update_add_htlc = msgs::UpdateAddHTLC {
3387                         channel_id: chan.2,
3388                         htlc_id: 0,
3389                         amount_msat: 0,
3390                         payment_hash,
3391                         cltv_expiry,
3392                         onion_routing_packet,
3393                 };
3394                 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc);
3395         }
3396         let events = nodes[0].node.get_and_clear_pending_events();
3397         assert_eq!(events.len(), 3);
3398         // Check that Alice fails backward the pending HTLC from the second payment.
3399         match events[0] {
3400                 Event::PaymentPathFailed { payment_hash, .. } => {
3401                         assert_eq!(payment_hash, failed_payment_hash);
3402                 },
3403                 _ => panic!("Unexpected event"),
3404         }
3405         match events[1] {
3406                 Event::PaymentFailed { payment_hash, .. } => {
3407                         assert_eq!(payment_hash, failed_payment_hash);
3408                 },
3409                 _ => panic!("Unexpected event"),
3410         }
3411         match events[2] {
3412                 Event::ChannelClosed { reason: ClosureReason::ProcessingError { ref err }, .. } => {
3413                         assert_eq!(err, "Remote side tried to send a 0-msat HTLC");
3414                 },
3415                 _ => panic!("Unexpected event {:?}", events[1]),
3416         }
3417         check_closed_broadcast!(nodes[0], true);
3418         check_added_monitors!(nodes[0], 1);
3419 }
3420
3421 #[test]
3422 fn test_htlc_ignore_latest_remote_commitment() {
3423         // Test that HTLC transactions spending the latest remote commitment transaction are simply
3424         // ignored if we cannot claim them. This originally tickled an invalid unwrap().
3425         let chanmon_cfgs = create_chanmon_cfgs(2);
3426         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3427         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3428         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3429         if *nodes[1].connect_style.borrow() == ConnectStyle::FullBlockViaListen {
3430                 // We rely on the ability to connect a block redundantly, which isn't allowed via
3431                 // `chain::Listen`, so we never run the test if we randomly get assigned that
3432                 // connect_style.
3433                 return;
3434         }
3435         create_announced_chan_between_nodes(&nodes, 0, 1);
3436
3437         route_payment(&nodes[0], &[&nodes[1]], 10000000);
3438         nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
3439         connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
3440         check_closed_broadcast!(nodes[0], true);
3441         check_added_monitors!(nodes[0], 1);
3442         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
3443
3444         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3445         assert_eq!(node_txn.len(), 3);
3446         assert_eq!(node_txn[0], node_txn[1]);
3447
3448         let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
3449         connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[1].clone()]});
3450         check_closed_broadcast!(nodes[1], true);
3451         check_added_monitors!(nodes[1], 1);
3452         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
3453
3454         // Duplicate the connect_block call since this may happen due to other listeners
3455         // registering new transactions
3456         connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[2].clone()]});
3457 }
3458
3459 #[test]
3460 fn test_force_close_fail_back() {
3461         // Check which HTLCs are failed-backwards on channel force-closure
3462         let chanmon_cfgs = create_chanmon_cfgs(3);
3463         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3464         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3465         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3466         create_announced_chan_between_nodes(&nodes, 0, 1);
3467         create_announced_chan_between_nodes(&nodes, 1, 2);
3468
3469         let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000);
3470
3471         let mut payment_event = {
3472                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
3473                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
3474                 check_added_monitors!(nodes[0], 1);
3475
3476                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3477                 assert_eq!(events.len(), 1);
3478                 SendEvent::from_event(events.remove(0))
3479         };
3480
3481         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
3482         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
3483
3484         expect_pending_htlcs_forwardable!(nodes[1]);
3485
3486         let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
3487         assert_eq!(events_2.len(), 1);
3488         payment_event = SendEvent::from_event(events_2.remove(0));
3489         assert_eq!(payment_event.msgs.len(), 1);
3490
3491         check_added_monitors!(nodes[1], 1);
3492         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
3493         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg);
3494         check_added_monitors!(nodes[2], 1);
3495         let (_, _) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3496
3497         // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous
3498         // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
3499         // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
3500
3501         nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
3502         check_closed_broadcast!(nodes[2], true);
3503         check_added_monitors!(nodes[2], 1);
3504         check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
3505         let tx = {
3506                 let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
3507                 // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
3508                 // have a use for it unless nodes[2] learns the preimage somehow, the funds will go
3509                 // back to nodes[1] upon timeout otherwise.
3510                 assert_eq!(node_txn.len(), 1);
3511                 node_txn.remove(0)
3512         };
3513
3514         mine_transaction(&nodes[1], &tx);
3515
3516         // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
3517         check_closed_broadcast!(nodes[1], true);
3518         check_added_monitors!(nodes[1], 1);
3519         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
3520
3521         // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
3522         {
3523                 get_monitor!(nodes[2], payment_event.commitment_msg.channel_id)
3524                         .provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator), &node_cfgs[2].logger);
3525         }
3526         mine_transaction(&nodes[2], &tx);
3527         let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
3528         assert_eq!(node_txn.len(), 1);
3529         assert_eq!(node_txn[0].input.len(), 1);
3530         assert_eq!(node_txn[0].input[0].previous_output.txid, tx.txid());
3531         assert_eq!(node_txn[0].lock_time.0, 0); // Must be an HTLC-Success
3532         assert_eq!(node_txn[0].input[0].witness.len(), 5); // Must be an HTLC-Success
3533
3534         check_spends!(node_txn[0], tx);
3535 }
3536
3537 #[test]
3538 fn test_dup_events_on_peer_disconnect() {
3539         // Test that if we receive a duplicative update_fulfill_htlc message after a reconnect we do
3540         // not generate a corresponding duplicative PaymentSent event. This did not use to be the case
3541         // as we used to generate the event immediately upon receipt of the payment preimage in the
3542         // update_fulfill_htlc message.
3543
3544         let chanmon_cfgs = create_chanmon_cfgs(2);
3545         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3546         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3547         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3548         create_announced_chan_between_nodes(&nodes, 0, 1);
3549
3550         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
3551
3552         nodes[1].node.claim_funds(payment_preimage);
3553         expect_payment_claimed!(nodes[1], payment_hash, 1_000_000);
3554         check_added_monitors!(nodes[1], 1);
3555         let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
3556         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]);
3557         expect_payment_sent_without_paths!(nodes[0], payment_preimage);
3558
3559         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3560         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3561
3562         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
3563         expect_payment_path_successful!(nodes[0]);
3564 }
3565
3566 #[test]
3567 fn test_peer_disconnected_before_funding_broadcasted() {
3568         // Test that channels are closed with `ClosureReason::DisconnectedPeer` if the peer disconnects
3569         // before the funding transaction has been broadcasted.
3570         let chanmon_cfgs = create_chanmon_cfgs(2);
3571         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3572         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3573         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3574
3575         // Open a channel between `nodes[0]` and `nodes[1]`, for which the funding transaction is never
3576         // broadcasted, even though it's created by `nodes[0]`.
3577         let expected_temporary_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
3578         let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
3579         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
3580         let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
3581         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
3582
3583         let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
3584         assert_eq!(temporary_channel_id, expected_temporary_channel_id);
3585
3586         assert!(nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
3587
3588         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
3589         assert_eq!(funding_created_msg.temporary_channel_id, expected_temporary_channel_id);
3590
3591         // Even though the funding transaction is created by `nodes[0]`, the `FundingCreated` msg is
3592         // never sent to `nodes[1]`, and therefore the tx is never signed by either party nor
3593         // broadcasted.
3594         {
3595                 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
3596         }
3597
3598         // Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` when the peers are
3599         // disconnected before the funding transaction was broadcasted.
3600         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3601         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3602
3603         check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer);
3604         check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
3605 }
3606
3607 #[test]
3608 fn test_simple_peer_disconnect() {
3609         // Test that we can reconnect when there are no lost messages
3610         let chanmon_cfgs = create_chanmon_cfgs(3);
3611         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3612         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3613         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3614         create_announced_chan_between_nodes(&nodes, 0, 1);
3615         create_announced_chan_between_nodes(&nodes, 1, 2);
3616
3617         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3618         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3619         reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3620
3621         let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
3622         let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3623         fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2);
3624         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1);
3625
3626         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3627         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3628         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3629
3630         let (payment_preimage_3, payment_hash_3, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000);
3631         let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
3632         let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3633         let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3634
3635         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3636         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3637
3638         claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_preimage_3);
3639         fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5);
3640
3641         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (1, 0), (1, 0), (false, false));
3642         {
3643                 let events = nodes[0].node.get_and_clear_pending_events();
3644                 assert_eq!(events.len(), 4);
3645                 match events[0] {
3646                         Event::PaymentSent { payment_preimage, payment_hash, .. } => {
3647                                 assert_eq!(payment_preimage, payment_preimage_3);
3648                                 assert_eq!(payment_hash, payment_hash_3);
3649                         },
3650                         _ => panic!("Unexpected event"),
3651                 }
3652                 match events[1] {
3653                         Event::PaymentPathSuccessful { .. } => {},
3654                         _ => panic!("Unexpected event"),
3655                 }
3656                 match events[2] {
3657                         Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } => {
3658                                 assert_eq!(payment_hash, payment_hash_5);
3659                                 assert!(payment_failed_permanently);
3660                         },
3661                         _ => panic!("Unexpected event"),
3662                 }
3663                 match events[3] {
3664                         Event::PaymentFailed { payment_hash, .. } => {
3665                                 assert_eq!(payment_hash, payment_hash_5);
3666                         },
3667                         _ => panic!("Unexpected event"),
3668                 }
3669         }
3670
3671         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4);
3672         fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6);
3673 }
3674
3675 fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken_lnd: bool) {
3676         // Test that we can reconnect when in-flight HTLC updates get dropped
3677         let chanmon_cfgs = create_chanmon_cfgs(2);
3678         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3679         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3680         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3681
3682         let mut as_channel_ready = None;
3683         let channel_id = if messages_delivered == 0 {
3684                 let (channel_ready, chan_id, _) = create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001);
3685                 as_channel_ready = Some(channel_ready);
3686                 // nodes[1] doesn't receive the channel_ready message (it'll be re-sent on reconnect)
3687                 // Note that we store it so that if we're running with `simulate_broken_lnd` we can deliver
3688                 // it before the channel_reestablish message.
3689                 chan_id
3690         } else {
3691                 create_announced_chan_between_nodes(&nodes, 0, 1).2
3692         };
3693
3694         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000);
3695
3696         let payment_event = {
3697                 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
3698                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
3699                 check_added_monitors!(nodes[0], 1);
3700
3701                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3702                 assert_eq!(events.len(), 1);
3703                 SendEvent::from_event(events.remove(0))
3704         };
3705         assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id);
3706
3707         if messages_delivered < 2 {
3708                 // Drop the payment_event messages, and let them get re-generated in reconnect_nodes!
3709         } else {
3710                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
3711                 if messages_delivered >= 3 {
3712                         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
3713                         check_added_monitors!(nodes[1], 1);
3714                         let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
3715
3716                         if messages_delivered >= 4 {
3717                                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
3718                                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3719                                 check_added_monitors!(nodes[0], 1);
3720
3721                                 if messages_delivered >= 5 {
3722                                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
3723                                         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
3724                                         // No commitment_signed so get_event_msg's assert(len == 1) passes
3725                                         check_added_monitors!(nodes[0], 1);
3726
3727                                         if messages_delivered >= 6 {
3728                                                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
3729                                                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3730                                                 check_added_monitors!(nodes[1], 1);
3731                                         }
3732                                 }
3733                         }
3734                 }
3735         }
3736
3737         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3738         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3739         if messages_delivered < 3 {
3740                 if simulate_broken_lnd {
3741                         // lnd has a long-standing bug where they send a channel_ready prior to a
3742                         // channel_reestablish if you reconnect prior to channel_ready time.
3743                         //
3744                         // Here we simulate that behavior, delivering a channel_ready immediately on
3745                         // reconnect. Note that we don't bother skipping the now-duplicate channel_ready sent
3746                         // in `reconnect_nodes` but we currently don't fail based on that.
3747                         //
3748                         // See-also <https://github.com/lightningnetwork/lnd/issues/4006>
3749                         nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready.as_ref().unwrap().0);
3750                 }
3751                 // Even if the channel_ready messages get exchanged, as long as nothing further was
3752                 // received on either side, both sides will need to resend them.
3753                 reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 1), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3754         } else if messages_delivered == 3 {
3755                 // nodes[0] still wants its RAA + commitment_signed
3756                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
3757         } else if messages_delivered == 4 {
3758                 // nodes[0] still wants its commitment_signed
3759                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3760         } else if messages_delivered == 5 {
3761                 // nodes[1] still wants its final RAA
3762                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
3763         } else if messages_delivered == 6 {
3764                 // Everything was delivered...
3765                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3766         }
3767
3768         let events_1 = nodes[1].node.get_and_clear_pending_events();
3769         if messages_delivered == 0 {
3770                 assert_eq!(events_1.len(), 2);
3771                 match events_1[0] {
3772                         Event::ChannelReady { .. } => { },
3773                         _ => panic!("Unexpected event"),
3774                 };
3775                 match events_1[1] {
3776                         Event::PendingHTLCsForwardable { .. } => { },
3777                         _ => panic!("Unexpected event"),
3778                 };
3779         } else {
3780                 assert_eq!(events_1.len(), 1);
3781                 match events_1[0] {
3782                         Event::PendingHTLCsForwardable { .. } => { },
3783                         _ => panic!("Unexpected event"),
3784                 };
3785         }
3786
3787         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3788         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3789         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3790
3791         nodes[1].node.process_pending_htlc_forwards();
3792
3793         let events_2 = nodes[1].node.get_and_clear_pending_events();
3794         assert_eq!(events_2.len(), 1);
3795         match events_2[0] {
3796                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id: _ } => {
3797                         assert_eq!(payment_hash_1, *payment_hash);
3798                         assert_eq!(amount_msat, 1_000_000);
3799                         assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
3800                         assert_eq!(via_channel_id, Some(channel_id));
3801                         match &purpose {
3802                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
3803                                         assert!(payment_preimage.is_none());
3804                                         assert_eq!(payment_secret_1, *payment_secret);
3805                                 },
3806                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
3807                         }
3808                 },
3809                 _ => panic!("Unexpected event"),
3810         }
3811
3812         nodes[1].node.claim_funds(payment_preimage_1);
3813         check_added_monitors!(nodes[1], 1);
3814         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
3815
3816         let events_3 = nodes[1].node.get_and_clear_pending_msg_events();
3817         assert_eq!(events_3.len(), 1);
3818         let (update_fulfill_htlc, commitment_signed) = match events_3[0] {
3819                 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
3820                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
3821                         assert!(updates.update_add_htlcs.is_empty());
3822                         assert!(updates.update_fail_htlcs.is_empty());
3823                         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
3824                         assert!(updates.update_fail_malformed_htlcs.is_empty());
3825                         assert!(updates.update_fee.is_none());
3826                         (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
3827                 },
3828                 _ => panic!("Unexpected event"),
3829         };
3830
3831         if messages_delivered >= 1 {
3832                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlc);
3833
3834                 let events_4 = nodes[0].node.get_and_clear_pending_events();
3835                 assert_eq!(events_4.len(), 1);
3836                 match events_4[0] {
3837                         Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
3838                                 assert_eq!(payment_preimage_1, *payment_preimage);
3839                                 assert_eq!(payment_hash_1, *payment_hash);
3840                         },
3841                         _ => panic!("Unexpected event"),
3842                 }
3843
3844                 if messages_delivered >= 2 {
3845                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
3846                         check_added_monitors!(nodes[0], 1);
3847                         let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
3848
3849                         if messages_delivered >= 3 {
3850                                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
3851                                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3852                                 check_added_monitors!(nodes[1], 1);
3853
3854                                 if messages_delivered >= 4 {
3855                                         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed);
3856                                         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
3857                                         // No commitment_signed so get_event_msg's assert(len == 1) passes
3858                                         check_added_monitors!(nodes[1], 1);
3859
3860                                         if messages_delivered >= 5 {
3861                                                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
3862                                                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3863                                                 check_added_monitors!(nodes[0], 1);
3864                                         }
3865                                 }
3866                         }
3867                 }
3868         }
3869
3870         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3871         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3872         if messages_delivered < 2 {
3873                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
3874                 if messages_delivered < 1 {
3875                         expect_payment_sent!(nodes[0], payment_preimage_1);
3876                 } else {
3877                         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3878                 }
3879         } else if messages_delivered == 2 {
3880                 // nodes[0] still wants its RAA + commitment_signed
3881                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
3882         } else if messages_delivered == 3 {
3883                 // nodes[0] still wants its commitment_signed
3884                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3885         } else if messages_delivered == 4 {
3886                 // nodes[1] still wants its final RAA
3887                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
3888         } else if messages_delivered == 5 {
3889                 // Everything was delivered...
3890                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3891         }
3892
3893         if messages_delivered == 1 || messages_delivered == 2 {
3894                 expect_payment_path_successful!(nodes[0]);
3895         }
3896         if messages_delivered <= 5 {
3897                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3898                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3899         }
3900         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3901
3902         if messages_delivered > 2 {
3903                 expect_payment_path_successful!(nodes[0]);
3904         }
3905
3906         // Channel should still work fine...
3907         let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
3908         let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
3909         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
3910 }
3911
3912 #[test]
3913 fn test_drop_messages_peer_disconnect_a() {
3914         do_test_drop_messages_peer_disconnect(0, true);
3915         do_test_drop_messages_peer_disconnect(0, false);
3916         do_test_drop_messages_peer_disconnect(1, false);
3917         do_test_drop_messages_peer_disconnect(2, false);
3918 }
3919
3920 #[test]
3921 fn test_drop_messages_peer_disconnect_b() {
3922         do_test_drop_messages_peer_disconnect(3, false);
3923         do_test_drop_messages_peer_disconnect(4, false);
3924         do_test_drop_messages_peer_disconnect(5, false);
3925         do_test_drop_messages_peer_disconnect(6, false);
3926 }
3927
3928 #[test]
3929 fn test_channel_ready_without_best_block_updated() {
3930         // Previously, if we were offline when a funding transaction was locked in, and then we came
3931         // back online, calling best_block_updated once followed by transactions_confirmed, we'd not
3932         // generate a channel_ready until a later best_block_updated. This tests that we generate the
3933         // channel_ready immediately instead.
3934         let chanmon_cfgs = create_chanmon_cfgs(2);
3935         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3936         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3937         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3938         *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
3939
3940         let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
3941
3942         let conf_height = nodes[0].best_block_info().1 + 1;
3943         connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
3944         let block_txn = [funding_tx];
3945         let conf_txn: Vec<_> = block_txn.iter().enumerate().collect();
3946         let conf_block_header = nodes[0].get_block_header(conf_height);
3947         nodes[0].node.transactions_confirmed(&conf_block_header, &conf_txn[..], conf_height);
3948
3949         // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
3950         let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
3951         nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
3952 }
3953
3954 #[test]
3955 fn test_drop_messages_peer_disconnect_dual_htlc() {
3956         // Test that we can handle reconnecting when both sides of a channel have pending
3957         // commitment_updates when we disconnect.
3958         let chanmon_cfgs = create_chanmon_cfgs(2);
3959         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3960         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3961         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3962         create_announced_chan_between_nodes(&nodes, 0, 1);
3963
3964         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
3965
3966         // Now try to send a second payment which will fail to send
3967         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
3968         nodes[0].node.send_payment_with_route(&route, payment_hash_2,
3969                 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
3970         check_added_monitors!(nodes[0], 1);
3971
3972         let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
3973         assert_eq!(events_1.len(), 1);
3974         match events_1[0] {
3975                 MessageSendEvent::UpdateHTLCs { .. } => {},
3976                 _ => panic!("Unexpected event"),
3977         }
3978
3979         nodes[1].node.claim_funds(payment_preimage_1);
3980         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
3981         check_added_monitors!(nodes[1], 1);
3982
3983         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
3984         assert_eq!(events_2.len(), 1);
3985         match events_2[0] {
3986                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
3987                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
3988                         assert!(update_add_htlcs.is_empty());
3989                         assert_eq!(update_fulfill_htlcs.len(), 1);
3990                         assert!(update_fail_htlcs.is_empty());
3991                         assert!(update_fail_malformed_htlcs.is_empty());
3992                         assert!(update_fee.is_none());
3993
3994                         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
3995                         let events_3 = nodes[0].node.get_and_clear_pending_events();
3996                         assert_eq!(events_3.len(), 1);
3997                         match events_3[0] {
3998                                 Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
3999                                         assert_eq!(*payment_preimage, payment_preimage_1);
4000                                         assert_eq!(*payment_hash, payment_hash_1);
4001                                 },
4002                                 _ => panic!("Unexpected event"),
4003                         }
4004
4005                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
4006                         let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4007                         // No commitment_signed so get_event_msg's assert(len == 1) passes
4008                         check_added_monitors!(nodes[0], 1);
4009                 },
4010                 _ => panic!("Unexpected event"),
4011         }
4012
4013         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
4014         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
4015
4016         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
4017         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
4018         assert_eq!(reestablish_1.len(), 1);
4019         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
4020         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
4021         assert_eq!(reestablish_2.len(), 1);
4022
4023         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
4024         let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
4025         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
4026         let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
4027
4028         assert!(as_resp.0.is_none());
4029         assert!(bs_resp.0.is_none());
4030
4031         assert!(bs_resp.1.is_none());
4032         assert!(bs_resp.2.is_none());
4033
4034         assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
4035
4036         assert_eq!(as_resp.2.as_ref().unwrap().update_add_htlcs.len(), 1);
4037         assert!(as_resp.2.as_ref().unwrap().update_fulfill_htlcs.is_empty());
4038         assert!(as_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
4039         assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
4040         assert!(as_resp.2.as_ref().unwrap().update_fee.is_none());
4041         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]);
4042         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed);
4043         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4044         // No commitment_signed so get_event_msg's assert(len == 1) passes
4045         check_added_monitors!(nodes[1], 1);
4046
4047         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap());
4048         let bs_second_commitment_signed = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4049         assert!(bs_second_commitment_signed.update_add_htlcs.is_empty());
4050         assert!(bs_second_commitment_signed.update_fulfill_htlcs.is_empty());
4051         assert!(bs_second_commitment_signed.update_fail_htlcs.is_empty());
4052         assert!(bs_second_commitment_signed.update_fail_malformed_htlcs.is_empty());
4053         assert!(bs_second_commitment_signed.update_fee.is_none());
4054         check_added_monitors!(nodes[1], 1);
4055
4056         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
4057         let as_commitment_signed = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4058         assert!(as_commitment_signed.update_add_htlcs.is_empty());
4059         assert!(as_commitment_signed.update_fulfill_htlcs.is_empty());
4060         assert!(as_commitment_signed.update_fail_htlcs.is_empty());
4061         assert!(as_commitment_signed.update_fail_malformed_htlcs.is_empty());
4062         assert!(as_commitment_signed.update_fee.is_none());
4063         check_added_monitors!(nodes[0], 1);
4064
4065         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed);
4066         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4067         // No commitment_signed so get_event_msg's assert(len == 1) passes
4068         check_added_monitors!(nodes[0], 1);
4069
4070         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed);
4071         let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4072         // No commitment_signed so get_event_msg's assert(len == 1) passes
4073         check_added_monitors!(nodes[1], 1);
4074
4075         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
4076         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4077         check_added_monitors!(nodes[1], 1);
4078
4079         expect_pending_htlcs_forwardable!(nodes[1]);
4080
4081         let events_5 = nodes[1].node.get_and_clear_pending_events();
4082         assert_eq!(events_5.len(), 1);
4083         match events_5[0] {
4084                 Event::PaymentClaimable { ref payment_hash, ref purpose, .. } => {
4085                         assert_eq!(payment_hash_2, *payment_hash);
4086                         match &purpose {
4087                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
4088                                         assert!(payment_preimage.is_none());
4089                                         assert_eq!(payment_secret_2, *payment_secret);
4090                                 },
4091                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
4092                         }
4093                 },
4094                 _ => panic!("Unexpected event"),
4095         }
4096
4097         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack);
4098         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4099         check_added_monitors!(nodes[0], 1);
4100
4101         expect_payment_path_successful!(nodes[0]);
4102         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
4103 }
4104
4105 fn do_test_htlc_timeout(send_partial_mpp: bool) {
4106         // If the user fails to claim/fail an HTLC within the HTLC CLTV timeout we fail it for them
4107         // to avoid our counterparty failing the channel.
4108         let chanmon_cfgs = create_chanmon_cfgs(2);
4109         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4110         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4111         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4112
4113         create_announced_chan_between_nodes(&nodes, 0, 1);
4114
4115         let our_payment_hash = if send_partial_mpp {
4116                 let (route, our_payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100000);
4117                 // Use the utility function send_payment_along_path to send the payment with MPP data which
4118                 // indicates there are more HTLCs coming.
4119                 let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
4120                 let payment_id = PaymentId([42; 32]);
4121                 let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash, Some(payment_secret), payment_id, &route).unwrap();
4122                 nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
4123                 check_added_monitors!(nodes[0], 1);
4124                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
4125                 assert_eq!(events.len(), 1);
4126                 // Now do the relevant commitment_signed/RAA dances along the path, noting that the final
4127                 // hop should *not* yet generate any PaymentClaimable event(s).
4128                 pass_along_path(&nodes[0], &[&nodes[1]], 100000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
4129                 our_payment_hash
4130         } else {
4131                 route_payment(&nodes[0], &[&nodes[1]], 100000).1
4132         };
4133
4134         let mut block = Block {
4135                 header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
4136                 txdata: vec![],
4137         };
4138         connect_block(&nodes[0], &block);
4139         connect_block(&nodes[1], &block);
4140         let block_count = TEST_FINAL_CLTV + CHAN_CONFIRM_DEPTH + 2 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS;
4141         for _ in CHAN_CONFIRM_DEPTH + 2..block_count {
4142                 block.header.prev_blockhash = block.block_hash();
4143                 connect_block(&nodes[0], &block);
4144                 connect_block(&nodes[1], &block);
4145         }
4146
4147         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
4148
4149         check_added_monitors!(nodes[1], 1);
4150         let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4151         assert!(htlc_timeout_updates.update_add_htlcs.is_empty());
4152         assert_eq!(htlc_timeout_updates.update_fail_htlcs.len(), 1);
4153         assert!(htlc_timeout_updates.update_fail_malformed_htlcs.is_empty());
4154         assert!(htlc_timeout_updates.update_fee.is_none());
4155
4156         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_timeout_updates.update_fail_htlcs[0]);
4157         commitment_signed_dance!(nodes[0], nodes[1], htlc_timeout_updates.commitment_signed, false);
4158         // 100_000 msat as u64, followed by the height at which we failed back above
4159         let mut expected_failure_data = (100_000 as u64).to_be_bytes().to_vec();
4160         expected_failure_data.extend_from_slice(&(block_count - 1).to_be_bytes());
4161         expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000 | 15, &expected_failure_data[..]);
4162 }
4163
4164 #[test]
4165 fn test_htlc_timeout() {
4166         do_test_htlc_timeout(true);
4167         do_test_htlc_timeout(false);
4168 }
4169
4170 fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) {
4171         // Tests that HTLCs in the holding cell are timed out after the requisite number of blocks.
4172         let chanmon_cfgs = create_chanmon_cfgs(3);
4173         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
4174         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
4175         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
4176         create_announced_chan_between_nodes(&nodes, 0, 1);
4177         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4178
4179         // Make sure all nodes are at the same starting height
4180         connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
4181         connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
4182         connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
4183
4184         // Route a first payment to get the 1 -> 2 channel in awaiting_raa...
4185         let (route, first_payment_hash, _, first_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
4186         nodes[1].node.send_payment_with_route(&route, first_payment_hash,
4187                 RecipientOnionFields::secret_only(first_payment_secret), PaymentId(first_payment_hash.0)).unwrap();
4188         assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1);
4189         check_added_monitors!(nodes[1], 1);
4190
4191         // Now attempt to route a second payment, which should be placed in the holding cell
4192         let sending_node = if forwarded_htlc { &nodes[0] } else { &nodes[1] };
4193         let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(sending_node, nodes[2], 100000);
4194         sending_node.node.send_payment_with_route(&route, second_payment_hash,
4195                 RecipientOnionFields::secret_only(second_payment_secret), PaymentId(second_payment_hash.0)).unwrap();
4196         if forwarded_htlc {
4197                 check_added_monitors!(nodes[0], 1);
4198                 let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
4199                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
4200                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
4201                 expect_pending_htlcs_forwardable!(nodes[1]);
4202         }
4203         check_added_monitors!(nodes[1], 0);
4204
4205         connect_blocks(&nodes[1], TEST_FINAL_CLTV - LATENCY_GRACE_PERIOD_BLOCKS);
4206         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4207         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
4208         connect_blocks(&nodes[1], 1);
4209
4210         if forwarded_htlc {
4211                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
4212                 check_added_monitors!(nodes[1], 1);
4213                 let fail_commit = nodes[1].node.get_and_clear_pending_msg_events();
4214                 assert_eq!(fail_commit.len(), 1);
4215                 match fail_commit[0] {
4216                         MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, ref commitment_signed, .. }, .. } => {
4217                                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
4218                                 commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, true, true);
4219                         },
4220                         _ => unreachable!(),
4221                 }
4222                 expect_payment_failed_with_update!(nodes[0], second_payment_hash, false, chan_2.0.contents.short_channel_id, false);
4223         } else {
4224                 expect_payment_failed!(nodes[1], second_payment_hash, false);
4225         }
4226 }
4227
4228 #[test]
4229 fn test_holding_cell_htlc_add_timeouts() {
4230         do_test_holding_cell_htlc_add_timeouts(false);
4231         do_test_holding_cell_htlc_add_timeouts(true);
4232 }
4233
4234 macro_rules! check_spendable_outputs {
4235         ($node: expr, $keysinterface: expr) => {
4236                 {
4237                         let mut events = $node.chain_monitor.chain_monitor.get_and_clear_pending_events();
4238                         let mut txn = Vec::new();
4239                         let mut all_outputs = Vec::new();
4240                         let secp_ctx = Secp256k1::new();
4241                         for event in events.drain(..) {
4242                                 match event {
4243                                         Event::SpendableOutputs { mut outputs } => {
4244                                                 for outp in outputs.drain(..) {
4245                                                         txn.push($keysinterface.backing.spend_spendable_outputs(&[&outp], Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &secp_ctx).unwrap());
4246                                                         all_outputs.push(outp);
4247                                                 }
4248                                         },
4249                                         _ => panic!("Unexpected event"),
4250                                 };
4251                         }
4252                         if all_outputs.len() > 1 {
4253                                 if let Ok(tx) = $keysinterface.backing.spend_spendable_outputs(&all_outputs.iter().map(|a| a).collect::<Vec<_>>(), Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &secp_ctx) {
4254                                         txn.push(tx);
4255                                 }
4256                         }
4257                         txn
4258                 }
4259         }
4260 }
4261
4262 #[test]
4263 fn test_claim_sizeable_push_msat() {
4264         // Incidentally test SpendableOutput event generation due to detection of to_local output on commitment tx
4265         let chanmon_cfgs = create_chanmon_cfgs(2);
4266         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4267         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4268         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4269
4270         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000);
4271         nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
4272         check_closed_broadcast!(nodes[1], true);
4273         check_added_monitors!(nodes[1], 1);
4274         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
4275         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4276         assert_eq!(node_txn.len(), 1);
4277         check_spends!(node_txn[0], chan.3);
4278         assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
4279
4280         mine_transaction(&nodes[1], &node_txn[0]);
4281         connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1);
4282
4283         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4284         assert_eq!(spend_txn.len(), 1);
4285         assert_eq!(spend_txn[0].input.len(), 1);
4286         check_spends!(spend_txn[0], node_txn[0]);
4287         assert_eq!(spend_txn[0].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
4288 }
4289
4290 #[test]
4291 fn test_claim_on_remote_sizeable_push_msat() {
4292         // Same test as previous, just test on remote commitment tx, as per_commitment_point registration changes following you're funder/fundee and
4293         // to_remote output is encumbered by a P2WPKH
4294         let chanmon_cfgs = create_chanmon_cfgs(2);
4295         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4296         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4297         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4298
4299         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000);
4300         nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
4301         check_closed_broadcast!(nodes[0], true);
4302         check_added_monitors!(nodes[0], 1);
4303         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
4304
4305         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4306         assert_eq!(node_txn.len(), 1);
4307         check_spends!(node_txn[0], chan.3);
4308         assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
4309
4310         mine_transaction(&nodes[1], &node_txn[0]);
4311         check_closed_broadcast!(nodes[1], true);
4312         check_added_monitors!(nodes[1], 1);
4313         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
4314         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4315
4316         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4317         assert_eq!(spend_txn.len(), 1);
4318         check_spends!(spend_txn[0], node_txn[0]);
4319 }
4320
4321 #[test]
4322 fn test_claim_on_remote_revoked_sizeable_push_msat() {
4323         // Same test as previous, just test on remote revoked commitment tx, as per_commitment_point registration changes following you're funder/fundee and
4324         // to_remote output is encumbered by a P2WPKH
4325
4326         let chanmon_cfgs = create_chanmon_cfgs(2);
4327         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4328         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4329         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4330
4331         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 59000000);
4332         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4333         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan.2);
4334         assert_eq!(revoked_local_txn[0].input.len(), 1);
4335         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
4336
4337         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4338         mine_transaction(&nodes[1], &revoked_local_txn[0]);
4339         check_closed_broadcast!(nodes[1], true);
4340         check_added_monitors!(nodes[1], 1);
4341         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
4342
4343         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4344         mine_transaction(&nodes[1], &node_txn[0]);
4345         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4346
4347         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4348         assert_eq!(spend_txn.len(), 3);
4349         check_spends!(spend_txn[0], revoked_local_txn[0]); // to_remote output on revoked remote commitment_tx
4350         check_spends!(spend_txn[1], node_txn[0]);
4351         check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[0]); // Both outputs
4352 }
4353
4354 #[test]
4355 fn test_static_spendable_outputs_preimage_tx() {
4356         let chanmon_cfgs = create_chanmon_cfgs(2);
4357         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4358         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4359         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4360
4361         // Create some initial channels
4362         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4363
4364         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
4365
4366         let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
4367         assert_eq!(commitment_tx[0].input.len(), 1);
4368         assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
4369
4370         // Settle A's commitment tx on B's chain
4371         nodes[1].node.claim_funds(payment_preimage);
4372         expect_payment_claimed!(nodes[1], payment_hash, 3_000_000);
4373         check_added_monitors!(nodes[1], 1);
4374         mine_transaction(&nodes[1], &commitment_tx[0]);
4375         check_added_monitors!(nodes[1], 1);
4376         let events = nodes[1].node.get_and_clear_pending_msg_events();
4377         match events[0] {
4378                 MessageSendEvent::UpdateHTLCs { .. } => {},
4379                 _ => panic!("Unexpected event"),
4380         }
4381         match events[1] {
4382                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4383                 _ => panic!("Unexepected event"),
4384         }
4385
4386         // Check B's monitor was able to send back output descriptor event for preimage tx on A's commitment tx
4387         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: preimage tx
4388         assert_eq!(node_txn.len(), 1);
4389         check_spends!(node_txn[0], commitment_tx[0]);
4390         assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
4391
4392         mine_transaction(&nodes[1], &node_txn[0]);
4393         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
4394         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4395
4396         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4397         assert_eq!(spend_txn.len(), 1);
4398         check_spends!(spend_txn[0], node_txn[0]);
4399 }
4400
4401 #[test]
4402 fn test_static_spendable_outputs_timeout_tx() {
4403         let chanmon_cfgs = create_chanmon_cfgs(2);
4404         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4405         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4406         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4407
4408         // Create some initial channels
4409         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4410
4411         // Rebalance the network a bit by relaying one payment through all the channels ...
4412         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
4413
4414         let (_, our_payment_hash, _) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000);
4415
4416         let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
4417         assert_eq!(commitment_tx[0].input.len(), 1);
4418         assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
4419
4420         // Settle A's commitment tx on B' chain
4421         mine_transaction(&nodes[1], &commitment_tx[0]);
4422         check_added_monitors!(nodes[1], 1);
4423         let events = nodes[1].node.get_and_clear_pending_msg_events();
4424         match events[0] {
4425                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4426                 _ => panic!("Unexpected event"),
4427         }
4428         connect_blocks(&nodes[1], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
4429
4430         // Check B's monitor was able to send back output descriptor event for timeout tx on A's commitment tx
4431         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4432         assert_eq!(node_txn.len(), 1); // ChannelMonitor: timeout tx
4433         check_spends!(node_txn[0],  commitment_tx[0].clone());
4434         assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4435
4436         mine_transaction(&nodes[1], &node_txn[0]);
4437         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
4438         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4439         expect_payment_failed!(nodes[1], our_payment_hash, false);
4440
4441         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4442         assert_eq!(spend_txn.len(), 3); // SpendableOutput: remote_commitment_tx.to_remote, timeout_tx.output
4443         check_spends!(spend_txn[0], commitment_tx[0]);
4444         check_spends!(spend_txn[1], node_txn[0]);
4445         check_spends!(spend_txn[2], node_txn[0], commitment_tx[0]); // All outputs
4446 }
4447
4448 #[test]
4449 fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
4450         let chanmon_cfgs = create_chanmon_cfgs(2);
4451         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4452         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4453         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4454
4455         // Create some initial channels
4456         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4457
4458         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4459         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
4460         assert_eq!(revoked_local_txn[0].input.len(), 1);
4461         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4462
4463         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4464
4465         mine_transaction(&nodes[1], &revoked_local_txn[0]);
4466         check_closed_broadcast!(nodes[1], true);
4467         check_added_monitors!(nodes[1], 1);
4468         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
4469
4470         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4471         assert_eq!(node_txn.len(), 1);
4472         assert_eq!(node_txn[0].input.len(), 2);
4473         check_spends!(node_txn[0], revoked_local_txn[0]);
4474
4475         mine_transaction(&nodes[1], &node_txn[0]);
4476         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4477
4478         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4479         assert_eq!(spend_txn.len(), 1);
4480         check_spends!(spend_txn[0], node_txn[0]);
4481 }
4482
4483 #[test]
4484 fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
4485         let mut chanmon_cfgs = create_chanmon_cfgs(2);
4486         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
4487         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4488         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4489         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4490
4491         // Create some initial channels
4492         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4493
4494         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4495         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
4496         assert_eq!(revoked_local_txn[0].input.len(), 1);
4497         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4498
4499         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4500
4501         // A will generate HTLC-Timeout from revoked commitment tx
4502         mine_transaction(&nodes[0], &revoked_local_txn[0]);
4503         check_closed_broadcast!(nodes[0], true);
4504         check_added_monitors!(nodes[0], 1);
4505         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
4506         connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
4507
4508         let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4509         assert_eq!(revoked_htlc_txn.len(), 1);
4510         assert_eq!(revoked_htlc_txn[0].input.len(), 1);
4511         assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
4512         check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]);
4513         assert_ne!(revoked_htlc_txn[0].lock_time.0, 0); // HTLC-Timeout
4514
4515         // B will generate justice tx from A's revoked commitment/HTLC tx
4516         let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
4517         connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] });
4518         check_closed_broadcast!(nodes[1], true);
4519         check_added_monitors!(nodes[1], 1);
4520         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
4521
4522         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4523         assert_eq!(node_txn.len(), 2); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs
4524         // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0]
4525         // including the one already spent by revoked_htlc_txn[1]. That's OK, we'll spend with valid
4526         // transactions next...
4527         assert_eq!(node_txn[0].input.len(), 3);
4528         check_spends!(node_txn[0], revoked_local_txn[0], revoked_htlc_txn[0]);
4529
4530         assert_eq!(node_txn[1].input.len(), 2);
4531         check_spends!(node_txn[1], revoked_local_txn[0], revoked_htlc_txn[0]);
4532         if node_txn[1].input[1].previous_output.txid == revoked_htlc_txn[0].txid() {
4533                 assert_ne!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4534         } else {
4535                 assert_eq!(node_txn[1].input[0].previous_output.txid, revoked_htlc_txn[0].txid());
4536                 assert_ne!(node_txn[1].input[1].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4537         }
4538
4539         mine_transaction(&nodes[1], &node_txn[1]);
4540         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4541
4542         // Check B's ChannelMonitor was able to generate the right spendable output descriptor
4543         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4544         assert_eq!(spend_txn.len(), 1);
4545         assert_eq!(spend_txn[0].input.len(), 1);
4546         check_spends!(spend_txn[0], node_txn[1]);
4547 }
4548
4549 #[test]
4550 fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
4551         let mut chanmon_cfgs = create_chanmon_cfgs(2);
4552         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
4553         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4554         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4555         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4556
4557         // Create some initial channels
4558         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4559
4560         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4561         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
4562         assert_eq!(revoked_local_txn[0].input.len(), 1);
4563         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4564
4565         // The to-be-revoked commitment tx should have one HTLC and one to_remote output
4566         assert_eq!(revoked_local_txn[0].output.len(), 2);
4567
4568         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4569
4570         // B will generate HTLC-Success from revoked commitment tx
4571         mine_transaction(&nodes[1], &revoked_local_txn[0]);
4572         check_closed_broadcast!(nodes[1], true);
4573         check_added_monitors!(nodes[1], 1);
4574         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
4575         let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4576
4577         assert_eq!(revoked_htlc_txn.len(), 1);
4578         assert_eq!(revoked_htlc_txn[0].input.len(), 1);
4579         assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4580         check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]);
4581
4582         // Check that the unspent (of two) outputs on revoked_local_txn[0] is a P2WPKH:
4583         let unspent_local_txn_output = revoked_htlc_txn[0].input[0].previous_output.vout as usize ^ 1;
4584         assert_eq!(revoked_local_txn[0].output[unspent_local_txn_output].script_pubkey.len(), 2 + 20); // P2WPKH
4585
4586         // A will generate justice tx from B's revoked commitment/HTLC tx
4587         let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
4588         connect_block(&nodes[0], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] });
4589         check_closed_broadcast!(nodes[0], true);
4590         check_added_monitors!(nodes[0], 1);
4591         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
4592
4593         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4594         assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success
4595
4596         // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0]
4597         // including the one already spent by revoked_htlc_txn[0]. That's OK, we'll spend with valid
4598         // transactions next...
4599         assert_eq!(node_txn[0].input.len(), 2);
4600         check_spends!(node_txn[0], revoked_local_txn[0], revoked_htlc_txn[0]);
4601         if node_txn[0].input[1].previous_output.txid == revoked_htlc_txn[0].txid() {
4602                 assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4603         } else {
4604                 assert_eq!(node_txn[0].input[0].previous_output.txid, revoked_htlc_txn[0].txid());
4605                 assert_eq!(node_txn[0].input[1].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4606         }
4607
4608         assert_eq!(node_txn[1].input.len(), 1);
4609         check_spends!(node_txn[1], revoked_htlc_txn[0]);
4610
4611         mine_transaction(&nodes[0], &node_txn[1]);
4612         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
4613
4614         // Note that nodes[0]'s tx_broadcaster is still locked, so if we get here the channelmonitor
4615         // didn't try to generate any new transactions.
4616
4617         // Check A's ChannelMonitor was able to generate the right spendable output descriptor
4618         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
4619         assert_eq!(spend_txn.len(), 3);
4620         assert_eq!(spend_txn[0].input.len(), 1);
4621         check_spends!(spend_txn[0], revoked_local_txn[0]); // spending to_remote output from revoked local tx
4622         assert_ne!(spend_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4623         check_spends!(spend_txn[1], node_txn[1]); // spending justice tx output on the htlc success tx
4624         check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[1]); // Both outputs
4625 }
4626
4627 #[test]
4628 fn test_onchain_to_onchain_claim() {
4629         // Test that in case of channel closure, we detect the state of output and claim HTLC
4630         // on downstream peer's remote commitment tx.
4631         // First, have C claim an HTLC against its own latest commitment transaction.
4632         // Then, broadcast these to B, which should update the monitor downstream on the A<->B
4633         // channel.
4634         // Finally, check that B will claim the HTLC output if A's latest commitment transaction
4635         // gets broadcast.
4636
4637         let chanmon_cfgs = create_chanmon_cfgs(3);
4638         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
4639         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
4640         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
4641
4642         // Create some initial channels
4643         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4644         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4645
4646         // Ensure all nodes are at the same height
4647         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
4648         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
4649         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
4650         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
4651
4652         // Rebalance the network a bit by relaying one payment through all the channels ...
4653         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
4654         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
4655
4656         let (payment_preimage, payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
4657         let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
4658         check_spends!(commitment_tx[0], chan_2.3);
4659         nodes[2].node.claim_funds(payment_preimage);
4660         expect_payment_claimed!(nodes[2], payment_hash, 3_000_000);
4661         check_added_monitors!(nodes[2], 1);
4662         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
4663         assert!(updates.update_add_htlcs.is_empty());
4664         assert!(updates.update_fail_htlcs.is_empty());
4665         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
4666         assert!(updates.update_fail_malformed_htlcs.is_empty());
4667
4668         mine_transaction(&nodes[2], &commitment_tx[0]);
4669         check_closed_broadcast!(nodes[2], true);
4670         check_added_monitors!(nodes[2], 1);
4671         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
4672
4673         let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 1 (HTLC-Success tx)
4674         assert_eq!(c_txn.len(), 1);
4675         check_spends!(c_txn[0], commitment_tx[0]);
4676         assert_eq!(c_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4677         assert!(c_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
4678         assert_eq!(c_txn[0].lock_time.0, 0); // Success tx
4679
4680         // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor
4681         let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42};
4682         connect_block(&nodes[1], &Block { header, txdata: vec![commitment_tx[0].clone(), c_txn[0].clone()]});
4683         check_added_monitors!(nodes[1], 1);
4684         let events = nodes[1].node.get_and_clear_pending_events();
4685         assert_eq!(events.len(), 2);
4686         match events[0] {
4687                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
4688                 _ => panic!("Unexpected event"),
4689         }
4690         match events[1] {
4691                 Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => {
4692                         assert_eq!(fee_earned_msat, Some(1000));
4693                         assert_eq!(prev_channel_id, Some(chan_1.2));
4694                         assert_eq!(claim_from_onchain_tx, true);
4695                         assert_eq!(next_channel_id, Some(chan_2.2));
4696                         assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
4697                 },
4698                 _ => panic!("Unexpected event"),
4699         }
4700         check_added_monitors!(nodes[1], 1);
4701         let mut msg_events = nodes[1].node.get_and_clear_pending_msg_events();
4702         assert_eq!(msg_events.len(), 3);
4703         let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut msg_events);
4704         let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut msg_events);
4705
4706         match nodes_2_event {
4707                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id: _ } => {},
4708                 _ => panic!("Unexpected event"),
4709         }
4710
4711         match nodes_0_event {
4712                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
4713                         assert!(update_add_htlcs.is_empty());
4714                         assert!(update_fail_htlcs.is_empty());
4715                         assert_eq!(update_fulfill_htlcs.len(), 1);
4716                         assert!(update_fail_malformed_htlcs.is_empty());
4717                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
4718                 },
4719                 _ => panic!("Unexpected event"),
4720         };
4721
4722         // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
4723         match msg_events[0] {
4724                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4725                 _ => panic!("Unexpected event"),
4726         }
4727
4728         // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
4729         let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
4730         mine_transaction(&nodes[1], &commitment_tx[0]);
4731         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
4732         let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4733         // ChannelMonitor: HTLC-Success tx
4734         assert_eq!(b_txn.len(), 1);
4735         check_spends!(b_txn[0], commitment_tx[0]);
4736         assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
4737         assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
4738         assert_eq!(b_txn[0].lock_time.0, nodes[1].best_block_info().1 + 1); // Success tx
4739
4740         check_closed_broadcast!(nodes[1], true);
4741         check_added_monitors!(nodes[1], 1);
4742 }
4743
4744 #[test]
4745 fn test_duplicate_payment_hash_one_failure_one_success() {
4746         // Topology : A --> B --> C --> D
4747         // We route 2 payments with same hash between B and C, one will be timeout, the other successfully claim
4748         // Note that because C will refuse to generate two payment secrets for the same payment hash,
4749         // we forward one of the payments onwards to D.
4750         let chanmon_cfgs = create_chanmon_cfgs(4);
4751         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
4752         // When this test was written, the default base fee floated based on the HTLC count.
4753         // It is now fixed, so we simply set the fee to the expected value here.
4754         let mut config = test_default_channel_config();
4755         config.channel_config.forwarding_fee_base_msat = 196;
4756         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs,
4757                 &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
4758         let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
4759
4760         create_announced_chan_between_nodes(&nodes, 0, 1);
4761         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4762         create_announced_chan_between_nodes(&nodes, 2, 3);
4763
4764         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
4765         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
4766         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
4767         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
4768         connect_blocks(&nodes[3], node_max_height - nodes[3].best_block_info().1);
4769
4770         let (our_payment_preimage, duplicate_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 900_000);
4771
4772         let payment_secret = nodes[3].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200, None).unwrap();
4773         // We reduce the final CLTV here by a somewhat arbitrary constant to keep it under the one-byte
4774         // script push size limit so that the below script length checks match
4775         // ACCEPTED_HTLC_SCRIPT_WEIGHT.
4776         let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV - 40)
4777                 .with_features(nodes[3].node.invoice_features());
4778         let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[3], payment_params, 800_000, TEST_FINAL_CLTV - 40);
4779         send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[2], &nodes[3]]], 800_000, duplicate_payment_hash, payment_secret);
4780
4781         let commitment_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
4782         assert_eq!(commitment_txn[0].input.len(), 1);
4783         check_spends!(commitment_txn[0], chan_2.3);
4784
4785         mine_transaction(&nodes[1], &commitment_txn[0]);
4786         check_closed_broadcast!(nodes[1], true);
4787         check_added_monitors!(nodes[1], 1);
4788         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
4789         connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32 - 1); // Confirm blocks until the HTLC expires
4790
4791         let htlc_timeout_tx;
4792         { // Extract one of the two HTLC-Timeout transaction
4793                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
4794                 // ChannelMonitor: timeout tx * 2-or-3
4795                 assert!(node_txn.len() == 2 || node_txn.len() == 3);
4796
4797                 check_spends!(node_txn[0], commitment_txn[0]);
4798                 assert_eq!(node_txn[0].input.len(), 1);
4799                 assert_eq!(node_txn[0].output.len(), 1);
4800
4801                 if node_txn.len() > 2 {
4802                         check_spends!(node_txn[1], commitment_txn[0]);
4803                         assert_eq!(node_txn[1].input.len(), 1);
4804                         assert_eq!(node_txn[1].output.len(), 1);
4805                         assert_eq!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
4806
4807                         check_spends!(node_txn[2], commitment_txn[0]);
4808                         assert_eq!(node_txn[2].input.len(), 1);
4809                         assert_eq!(node_txn[2].output.len(), 1);
4810                         assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output);
4811                 } else {
4812                         check_spends!(node_txn[1], commitment_txn[0]);
4813                         assert_eq!(node_txn[1].input.len(), 1);
4814                         assert_eq!(node_txn[1].output.len(), 1);
4815                         assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
4816                 }
4817
4818                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4819                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4820                 // Assign htlc_timeout_tx to the forwarded HTLC (with value ~800 sats). The received HTLC
4821                 // (with value 900 sats) will be claimed in the below `claim_funds` call.
4822                 if node_txn.len() > 2 {
4823                         assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4824                         htlc_timeout_tx = if node_txn[2].output[0].value < 900 { node_txn[2].clone() } else { node_txn[0].clone() };
4825                 } else {
4826                         htlc_timeout_tx = if node_txn[0].output[0].value < 900 { node_txn[1].clone() } else { node_txn[0].clone() };
4827                 }
4828         }
4829
4830         nodes[2].node.claim_funds(our_payment_preimage);
4831         expect_payment_claimed!(nodes[2], duplicate_payment_hash, 900_000);
4832
4833         mine_transaction(&nodes[2], &commitment_txn[0]);
4834         check_added_monitors!(nodes[2], 2);
4835         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
4836         let events = nodes[2].node.get_and_clear_pending_msg_events();
4837         match events[0] {
4838                 MessageSendEvent::UpdateHTLCs { .. } => {},
4839                 _ => panic!("Unexpected event"),
4840         }
4841         match events[1] {
4842                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4843                 _ => panic!("Unexepected event"),
4844         }
4845         let htlc_success_txn: Vec<_> = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4846         assert_eq!(htlc_success_txn.len(), 2); // ChannelMonitor: HTLC-Success txn (*2 due to 2-HTLC outputs)
4847         check_spends!(htlc_success_txn[0], commitment_txn[0]);
4848         check_spends!(htlc_success_txn[1], commitment_txn[0]);
4849         assert_eq!(htlc_success_txn[0].input.len(), 1);
4850         assert_eq!(htlc_success_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4851         assert_eq!(htlc_success_txn[1].input.len(), 1);
4852         assert_eq!(htlc_success_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4853         assert_ne!(htlc_success_txn[0].input[0].previous_output, htlc_success_txn[1].input[0].previous_output);
4854         assert_ne!(htlc_success_txn[1].input[0].previous_output, htlc_timeout_tx.input[0].previous_output);
4855
4856         mine_transaction(&nodes[1], &htlc_timeout_tx);
4857         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4858         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
4859         let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4860         assert!(htlc_updates.update_add_htlcs.is_empty());
4861         assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
4862         let first_htlc_id = htlc_updates.update_fail_htlcs[0].htlc_id;
4863         assert!(htlc_updates.update_fulfill_htlcs.is_empty());
4864         assert!(htlc_updates.update_fail_malformed_htlcs.is_empty());
4865         check_added_monitors!(nodes[1], 1);
4866
4867         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
4868         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4869         {
4870                 commitment_signed_dance!(nodes[0], nodes[1], &htlc_updates.commitment_signed, false, true);
4871         }
4872         expect_payment_failed_with_update!(nodes[0], duplicate_payment_hash, false, chan_2.0.contents.short_channel_id, true);
4873
4874         // Solve 2nd HTLC by broadcasting on B's chain HTLC-Success Tx from C
4875         mine_transaction(&nodes[1], &htlc_success_txn[1]);
4876         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(196), true, true);
4877         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4878         assert!(updates.update_add_htlcs.is_empty());
4879         assert!(updates.update_fail_htlcs.is_empty());
4880         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
4881         assert_ne!(updates.update_fulfill_htlcs[0].htlc_id, first_htlc_id);
4882         assert!(updates.update_fail_malformed_htlcs.is_empty());
4883         check_added_monitors!(nodes[1], 1);
4884
4885         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
4886         commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false);
4887
4888         let events = nodes[0].node.get_and_clear_pending_events();
4889         match events[0] {
4890                 Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
4891                         assert_eq!(*payment_preimage, our_payment_preimage);
4892                         assert_eq!(*payment_hash, duplicate_payment_hash);
4893                 }
4894                 _ => panic!("Unexpected event"),
4895         }
4896 }
4897
4898 #[test]
4899 fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
4900         let chanmon_cfgs = create_chanmon_cfgs(2);
4901         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4902         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4903         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4904
4905         // Create some initial channels
4906         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4907
4908         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
4909         let local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
4910         assert_eq!(local_txn.len(), 1);
4911         assert_eq!(local_txn[0].input.len(), 1);
4912         check_spends!(local_txn[0], chan_1.3);
4913
4914         // Give B knowledge of preimage to be able to generate a local HTLC-Success Tx
4915         nodes[1].node.claim_funds(payment_preimage);
4916         expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
4917         check_added_monitors!(nodes[1], 1);
4918
4919         mine_transaction(&nodes[1], &local_txn[0]);
4920         check_added_monitors!(nodes[1], 1);
4921         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
4922         let events = nodes[1].node.get_and_clear_pending_msg_events();
4923         match events[0] {
4924                 MessageSendEvent::UpdateHTLCs { .. } => {},
4925                 _ => panic!("Unexpected event"),
4926         }
4927         match events[1] {
4928                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4929                 _ => panic!("Unexepected event"),
4930         }
4931         let node_tx = {
4932                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
4933                 assert_eq!(node_txn.len(), 1);
4934                 assert_eq!(node_txn[0].input.len(), 1);
4935                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4936                 check_spends!(node_txn[0], local_txn[0]);
4937                 node_txn[0].clone()
4938         };
4939
4940         mine_transaction(&nodes[1], &node_tx);
4941         connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1);
4942
4943         // Verify that B is able to spend its own HTLC-Success tx thanks to spendable output event given back by its ChannelMonitor
4944         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4945         assert_eq!(spend_txn.len(), 1);
4946         assert_eq!(spend_txn[0].input.len(), 1);
4947         check_spends!(spend_txn[0], node_tx);
4948         assert_eq!(spend_txn[0].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
4949 }
4950
4951 fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, announce_latest: bool) {
4952         // Test that we fail backwards the full set of HTLCs we need to when remote broadcasts an
4953         // unrevoked commitment transaction.
4954         // This includes HTLCs which were below the dust threshold as well as HTLCs which were awaiting
4955         // a remote RAA before they could be failed backwards (and combinations thereof).
4956         // We also test duplicate-hash HTLCs by adding two nodes on each side of the target nodes which
4957         // use the same payment hashes.
4958         // Thus, we use a six-node network:
4959         //
4960         // A \         / E
4961         //    - C - D -
4962         // B /         \ F
4963         // And test where C fails back to A/B when D announces its latest commitment transaction
4964         let chanmon_cfgs = create_chanmon_cfgs(6);
4965         let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
4966         // When this test was written, the default base fee floated based on the HTLC count.
4967         // It is now fixed, so we simply set the fee to the expected value here.
4968         let mut config = test_default_channel_config();
4969         config.channel_config.forwarding_fee_base_msat = 196;
4970         let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs,
4971                 &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
4972         let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
4973
4974         let _chan_0_2 = create_announced_chan_between_nodes(&nodes, 0, 2);
4975         let _chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4976         let chan_2_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
4977         let chan_3_4 = create_announced_chan_between_nodes(&nodes, 3, 4);
4978         let chan_3_5  = create_announced_chan_between_nodes(&nodes, 3, 5);
4979
4980         // Rebalance and check output sanity...
4981         send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000);
4982         send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000);
4983         assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2);
4984
4985         let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id())
4986                 .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().holder_dust_limit_satoshis;
4987         // 0th HTLC:
4988         let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
4989         // 1st HTLC:
4990         let (_, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
4991         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
4992         // 2nd HTLC:
4993         send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_1, nodes[5].node.create_inbound_payment_for_hash(payment_hash_1, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
4994         // 3rd HTLC:
4995         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_2, nodes[5].node.create_inbound_payment_for_hash(payment_hash_2, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
4996         // 4th HTLC:
4997         let (_, payment_hash_3, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
4998         // 5th HTLC:
4999         let (_, payment_hash_4, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5000         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
5001         // 6th HTLC:
5002         send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_3, nodes[5].node.create_inbound_payment_for_hash(payment_hash_3, None, 7200, None).unwrap());
5003         // 7th HTLC:
5004         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_4, nodes[5].node.create_inbound_payment_for_hash(payment_hash_4, None, 7200, None).unwrap());
5005
5006         // 8th HTLC:
5007         let (_, payment_hash_5, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5008         // 9th HTLC:
5009         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
5010         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_5, nodes[5].node.create_inbound_payment_for_hash(payment_hash_5, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5011
5012         // 10th HTLC:
5013         let (_, payment_hash_6, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5014         // 11th HTLC:
5015         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
5016         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_6, nodes[5].node.create_inbound_payment_for_hash(payment_hash_6, None, 7200, None).unwrap());
5017
5018         // Double-check that six of the new HTLC were added
5019         // We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie,
5020         // with to_local and to_remote outputs, 8 outputs and 6 HTLCs not included).
5021         assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2).len(), 1);
5022         assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 8);
5023
5024         // Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go.
5025         // Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs
5026         nodes[4].node.fail_htlc_backwards(&payment_hash_1);
5027         nodes[4].node.fail_htlc_backwards(&payment_hash_3);
5028         nodes[4].node.fail_htlc_backwards(&payment_hash_5);
5029         nodes[4].node.fail_htlc_backwards(&payment_hash_6);
5030         check_added_monitors!(nodes[4], 0);
5031
5032         let failed_destinations = vec![
5033                 HTLCDestination::FailedPayment { payment_hash: payment_hash_1 },
5034                 HTLCDestination::FailedPayment { payment_hash: payment_hash_3 },
5035                 HTLCDestination::FailedPayment { payment_hash: payment_hash_5 },
5036                 HTLCDestination::FailedPayment { payment_hash: payment_hash_6 },
5037         ];
5038         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[4], failed_destinations);
5039         check_added_monitors!(nodes[4], 1);
5040
5041         let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id());
5042         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[0]);
5043         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[1]);
5044         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[2]);
5045         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[3]);
5046         commitment_signed_dance!(nodes[3], nodes[4], four_removes.commitment_signed, false);
5047
5048         // Fail 3rd below-dust and 7th above-dust HTLCs
5049         nodes[5].node.fail_htlc_backwards(&payment_hash_2);
5050         nodes[5].node.fail_htlc_backwards(&payment_hash_4);
5051         check_added_monitors!(nodes[5], 0);
5052
5053         let failed_destinations_2 = vec![
5054                 HTLCDestination::FailedPayment { payment_hash: payment_hash_2 },
5055                 HTLCDestination::FailedPayment { payment_hash: payment_hash_4 },
5056         ];
5057         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[5], failed_destinations_2);
5058         check_added_monitors!(nodes[5], 1);
5059
5060         let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id());
5061         nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[0]);
5062         nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]);
5063         commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false);
5064
5065         let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
5066
5067         // After 4 and 2 removes respectively above in nodes[4] and nodes[5], nodes[3] should receive 6 PaymentForwardedFailed events
5068         let failed_destinations_3 = vec![
5069                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5070                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5071                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5072                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5073                 HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
5074                 HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
5075         ];
5076         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations_3);
5077         check_added_monitors!(nodes[3], 1);
5078         let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
5079         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]);
5080         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[1]);
5081         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[2]);
5082         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[3]);
5083         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[4]);
5084         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[5]);
5085         if deliver_last_raa {
5086                 commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false);
5087         } else {
5088                 let _cs_last_raa = commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false, true, false, true);
5089         }
5090
5091         // D's latest commitment transaction now contains 1st + 2nd + 9th HTLCs (implicitly, they're
5092         // below the dust limit) and the 5th + 6th + 11th HTLCs. It has failed back the 0th, 3rd, 4th,
5093         // 7th, 8th, and 10th, but as we haven't yet delivered the final RAA to C, the fails haven't
5094         // propagated back to A/B yet (and D has two unrevoked commitment transactions).
5095         //
5096         // We now broadcast the latest commitment transaction, which *should* result in failures for
5097         // the 0th, 1st, 2nd, 3rd, 4th, 7th, 8th, 9th, and 10th HTLCs, ie all the below-dust HTLCs and
5098         // the non-broadcast above-dust HTLCs.
5099         //
5100         // Alternatively, we may broadcast the previous commitment transaction, which should only
5101         // result in failures for the below-dust HTLCs, ie the 0th, 1st, 2nd, 3rd, 9th, and 10th HTLCs.
5102         let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
5103
5104         if announce_latest {
5105                 mine_transaction(&nodes[2], &ds_last_commitment_tx[0]);
5106         } else {
5107                 mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]);
5108         }
5109         let events = nodes[2].node.get_and_clear_pending_events();
5110         let close_event = if deliver_last_raa {
5111                 assert_eq!(events.len(), 2 + 6);
5112                 events.last().clone().unwrap()
5113         } else {
5114                 assert_eq!(events.len(), 1);
5115                 events.last().clone().unwrap()
5116         };
5117         match close_event {
5118                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
5119                 _ => panic!("Unexpected event"),
5120         }
5121
5122         connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
5123         check_closed_broadcast!(nodes[2], true);
5124         if deliver_last_raa {
5125                 expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
5126
5127                 let expected_destinations: Vec<HTLCDestination> = repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect();
5128                 expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations);
5129         } else {
5130                 let expected_destinations: Vec<HTLCDestination> = if announce_latest {
5131                         repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(9).collect()
5132                 } else {
5133                         repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(6).collect()
5134                 };
5135
5136                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], expected_destinations);
5137         }
5138         check_added_monitors!(nodes[2], 3);
5139
5140         let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events();
5141         assert_eq!(cs_msgs.len(), 2);
5142         let mut a_done = false;
5143         for msg in cs_msgs {
5144                 match msg {
5145                         MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
5146                                 // Both under-dust HTLCs and the one above-dust HTLC that we had already failed
5147                                 // should be failed-backwards here.
5148                                 let target = if *node_id == nodes[0].node.get_our_node_id() {
5149                                         // If announce_latest, expect 0th, 1st, 4th, 8th, 10th HTLCs, else only 0th, 1st, 10th below-dust HTLCs
5150                                         for htlc in &updates.update_fail_htlcs {
5151                                                 assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 6 || if announce_latest { htlc.htlc_id == 3 || htlc.htlc_id == 5 } else { false });
5152                                         }
5153                                         assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 5 } else { 3 });
5154                                         assert!(!a_done);
5155                                         a_done = true;
5156                                         &nodes[0]
5157                                 } else {
5158                                         // If announce_latest, expect 2nd, 3rd, 7th, 9th HTLCs, else only 2nd, 3rd, 9th below-dust HTLCs
5159                                         for htlc in &updates.update_fail_htlcs {
5160                                                 assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 5 || if announce_latest { htlc.htlc_id == 4 } else { false });
5161                                         }
5162                                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
5163                                         assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 4 } else { 3 });
5164                                         &nodes[1]
5165                                 };
5166                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
5167                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[1]);
5168                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[2]);
5169                                 if announce_latest {
5170                                         target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[3]);
5171                                         if *node_id == nodes[0].node.get_our_node_id() {
5172                                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[4]);
5173                                         }
5174                                 }
5175                                 commitment_signed_dance!(target, nodes[2], updates.commitment_signed, false, true);
5176                         },
5177                         _ => panic!("Unexpected event"),
5178                 }
5179         }
5180
5181         let as_events = nodes[0].node.get_and_clear_pending_events();
5182         assert_eq!(as_events.len(), if announce_latest { 10 } else { 6 });
5183         let mut as_failds = HashSet::new();
5184         let mut as_updates = 0;
5185         for event in as_events.iter() {
5186                 if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
5187                         assert!(as_failds.insert(*payment_hash));
5188                         if *payment_hash != payment_hash_2 {
5189                                 assert_eq!(*payment_failed_permanently, deliver_last_raa);
5190                         } else {
5191                                 assert!(!payment_failed_permanently);
5192                         }
5193                         if let PathFailure::OnPath { network_update: Some(_) } = failure {
5194                                 as_updates += 1;
5195                         }
5196                 } else if let &Event::PaymentFailed { .. } = event {
5197                 } else { panic!("Unexpected event"); }
5198         }
5199         assert!(as_failds.contains(&payment_hash_1));
5200         assert!(as_failds.contains(&payment_hash_2));
5201         if announce_latest {
5202                 assert!(as_failds.contains(&payment_hash_3));
5203                 assert!(as_failds.contains(&payment_hash_5));
5204         }
5205         assert!(as_failds.contains(&payment_hash_6));
5206
5207         let bs_events = nodes[1].node.get_and_clear_pending_events();
5208         assert_eq!(bs_events.len(), if announce_latest { 8 } else { 6 });
5209         let mut bs_failds = HashSet::new();
5210         let mut bs_updates = 0;
5211         for event in bs_events.iter() {
5212                 if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
5213                         assert!(bs_failds.insert(*payment_hash));
5214                         if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 {
5215                                 assert_eq!(*payment_failed_permanently, deliver_last_raa);
5216                         } else {
5217                                 assert!(!payment_failed_permanently);
5218                         }
5219                         if let PathFailure::OnPath { network_update: Some(_) } = failure {
5220                                 bs_updates += 1;
5221                         }
5222                 } else if let &Event::PaymentFailed { .. } = event {
5223                 } else { panic!("Unexpected event"); }
5224         }
5225         assert!(bs_failds.contains(&payment_hash_1));
5226         assert!(bs_failds.contains(&payment_hash_2));
5227         if announce_latest {
5228                 assert!(bs_failds.contains(&payment_hash_4));
5229         }
5230         assert!(bs_failds.contains(&payment_hash_5));
5231
5232         // For each HTLC which was not failed-back by normal process (ie deliver_last_raa), we should
5233         // get a NetworkUpdate. A should have gotten 4 HTLCs which were failed-back due to
5234         // unknown-preimage-etc, B should have gotten 2. Thus, in the
5235         // announce_latest && deliver_last_raa case, we should have 5-4=1 and 4-2=2 NetworkUpdates.
5236         assert_eq!(as_updates, if deliver_last_raa { 1 } else if !announce_latest { 3 } else { 5 });
5237         assert_eq!(bs_updates, if deliver_last_raa { 2 } else if !announce_latest { 3 } else { 4 });
5238 }
5239
5240 #[test]
5241 fn test_fail_backwards_latest_remote_announce_a() {
5242         do_test_fail_backwards_unrevoked_remote_announce(false, true);
5243 }
5244
5245 #[test]
5246 fn test_fail_backwards_latest_remote_announce_b() {
5247         do_test_fail_backwards_unrevoked_remote_announce(true, true);
5248 }
5249
5250 #[test]
5251 fn test_fail_backwards_previous_remote_announce() {
5252         do_test_fail_backwards_unrevoked_remote_announce(false, false);
5253         // Note that true, true doesn't make sense as it implies we announce a revoked state, which is
5254         // tested for in test_commitment_revoked_fail_backward_exhaustive()
5255 }
5256
5257 #[test]
5258 fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
5259         let chanmon_cfgs = create_chanmon_cfgs(2);
5260         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5261         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5262         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5263
5264         // Create some initial channels
5265         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5266
5267         let (_, our_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
5268         let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
5269         assert_eq!(local_txn[0].input.len(), 1);
5270         check_spends!(local_txn[0], chan_1.3);
5271
5272         // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
5273         mine_transaction(&nodes[0], &local_txn[0]);
5274         check_closed_broadcast!(nodes[0], true);
5275         check_added_monitors!(nodes[0], 1);
5276         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
5277         connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
5278
5279         let htlc_timeout = {
5280                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5281                 assert_eq!(node_txn.len(), 1);
5282                 assert_eq!(node_txn[0].input.len(), 1);
5283                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5284                 check_spends!(node_txn[0], local_txn[0]);
5285                 node_txn[0].clone()
5286         };
5287
5288         mine_transaction(&nodes[0], &htlc_timeout);
5289         connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
5290         expect_payment_failed!(nodes[0], our_payment_hash, false);
5291
5292         // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
5293         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
5294         assert_eq!(spend_txn.len(), 3);
5295         check_spends!(spend_txn[0], local_txn[0]);
5296         assert_eq!(spend_txn[1].input.len(), 1);
5297         check_spends!(spend_txn[1], htlc_timeout);
5298         assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5299         assert_eq!(spend_txn[2].input.len(), 2);
5300         check_spends!(spend_txn[2], local_txn[0], htlc_timeout);
5301         assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 ||
5302                 spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32);
5303 }
5304
5305 #[test]
5306 fn test_key_derivation_params() {
5307         // This test is a copy of test_dynamic_spendable_outputs_local_htlc_timeout_tx, with a key
5308         // manager rotation to test that `channel_keys_id` returned in
5309         // [`SpendableOutputDescriptor::DelayedPaymentOutput`] let us re-derive the channel key set to
5310         // then derive a `delayed_payment_key`.
5311
5312         let chanmon_cfgs = create_chanmon_cfgs(3);
5313
5314         // We manually create the node configuration to backup the seed.
5315         let seed = [42; 32];
5316         let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
5317         let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager);
5318         let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[0].logger));
5319         let scorer = Mutex::new(test_utils::TestScorer::new());
5320         let router = test_utils::TestRouter::new(network_graph.clone(), &scorer);
5321         let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) };
5322         let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
5323         node_cfgs.remove(0);
5324         node_cfgs.insert(0, node);
5325
5326         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
5327         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
5328
5329         // Create some initial channels
5330         // Create a dummy channel to advance index by one and thus test re-derivation correctness
5331         // for node 0
5332         let chan_0 = create_announced_chan_between_nodes(&nodes, 0, 2);
5333         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5334         assert_ne!(chan_0.3.output[0].script_pubkey, chan_1.3.output[0].script_pubkey);
5335
5336         // Ensure all nodes are at the same height
5337         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
5338         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
5339         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
5340         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
5341
5342         let (_, our_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
5343         let local_txn_0 = get_local_commitment_txn!(nodes[0], chan_0.2);
5344         let local_txn_1 = get_local_commitment_txn!(nodes[0], chan_1.2);
5345         assert_eq!(local_txn_1[0].input.len(), 1);
5346         check_spends!(local_txn_1[0], chan_1.3);
5347
5348         // We check funding pubkey are unique
5349         let (from_0_funding_key_0, from_0_funding_key_1) = (PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][36..69]));
5350         let (from_1_funding_key_0, from_1_funding_key_1) = (PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][36..69]));
5351         if from_0_funding_key_0 == from_1_funding_key_0
5352             || from_0_funding_key_0 == from_1_funding_key_1
5353             || from_0_funding_key_1 == from_1_funding_key_0
5354             || from_0_funding_key_1 == from_1_funding_key_1 {
5355                 panic!("Funding pubkeys aren't unique");
5356         }
5357
5358         // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
5359         mine_transaction(&nodes[0], &local_txn_1[0]);
5360         connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
5361         check_closed_broadcast!(nodes[0], true);
5362         check_added_monitors!(nodes[0], 1);
5363         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
5364
5365         let htlc_timeout = {
5366                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5367                 assert_eq!(node_txn.len(), 1);
5368                 assert_eq!(node_txn[0].input.len(), 1);
5369                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5370                 check_spends!(node_txn[0], local_txn_1[0]);
5371                 node_txn[0].clone()
5372         };
5373
5374         mine_transaction(&nodes[0], &htlc_timeout);
5375         connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
5376         expect_payment_failed!(nodes[0], our_payment_hash, false);
5377
5378         // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
5379         let new_keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
5380         let spend_txn = check_spendable_outputs!(nodes[0], new_keys_manager);
5381         assert_eq!(spend_txn.len(), 3);
5382         check_spends!(spend_txn[0], local_txn_1[0]);
5383         assert_eq!(spend_txn[1].input.len(), 1);
5384         check_spends!(spend_txn[1], htlc_timeout);
5385         assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5386         assert_eq!(spend_txn[2].input.len(), 2);
5387         check_spends!(spend_txn[2], local_txn_1[0], htlc_timeout);
5388         assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 ||
5389                 spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32);
5390 }
5391
5392 #[test]
5393 fn test_static_output_closing_tx() {
5394         let chanmon_cfgs = create_chanmon_cfgs(2);
5395         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5396         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5397         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5398
5399         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5400
5401         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
5402         let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
5403
5404         mine_transaction(&nodes[0], &closing_tx);
5405         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
5406         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
5407
5408         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
5409         assert_eq!(spend_txn.len(), 1);
5410         check_spends!(spend_txn[0], closing_tx);
5411
5412         mine_transaction(&nodes[1], &closing_tx);
5413         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
5414         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5415
5416         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5417         assert_eq!(spend_txn.len(), 1);
5418         check_spends!(spend_txn[0], closing_tx);
5419 }
5420
5421 fn do_htlc_claim_local_commitment_only(use_dust: bool) {
5422         let chanmon_cfgs = create_chanmon_cfgs(2);
5423         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5424         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5425         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5426         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5427
5428         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3_000_000 });
5429
5430         // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being
5431         // present in B's local commitment transaction, but none of A's commitment transactions.
5432         nodes[1].node.claim_funds(payment_preimage);
5433         check_added_monitors!(nodes[1], 1);
5434         expect_payment_claimed!(nodes[1], payment_hash, if use_dust { 50000 } else { 3_000_000 });
5435
5436         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5437         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
5438         expect_payment_sent_without_paths!(nodes[0], payment_preimage);
5439
5440         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
5441         check_added_monitors!(nodes[0], 1);
5442         let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5443         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0);
5444         check_added_monitors!(nodes[1], 1);
5445
5446         let starting_block = nodes[1].best_block_info();
5447         let mut block = Block {
5448                 header: BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
5449                 txdata: vec![],
5450         };
5451         for _ in starting_block.1 + 1..TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + starting_block.1 + 2 {
5452                 connect_block(&nodes[1], &block);
5453                 block.header.prev_blockhash = block.block_hash();
5454         }
5455         test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
5456         check_closed_broadcast!(nodes[1], true);
5457         check_added_monitors!(nodes[1], 1);
5458         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
5459 }
5460
5461 fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
5462         let chanmon_cfgs = create_chanmon_cfgs(2);
5463         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5464         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5465         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5466         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5467
5468         let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], if use_dust { 50000 } else { 3000000 });
5469         nodes[0].node.send_payment_with_route(&route, payment_hash,
5470                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
5471         check_added_monitors!(nodes[0], 1);
5472
5473         let _as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5474
5475         // As far as A is concerned, the HTLC is now present only in the latest remote commitment
5476         // transaction, however it is not in A's latest local commitment, so we can just broadcast that
5477         // to "time out" the HTLC.
5478
5479         let starting_block = nodes[1].best_block_info();
5480         let mut header = BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
5481
5482         for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + starting_block.1 + 2 {
5483                 connect_block(&nodes[0], &Block { header, txdata: Vec::new()});
5484                 header.prev_blockhash = header.block_hash();
5485         }
5486         test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
5487         check_closed_broadcast!(nodes[0], true);
5488         check_added_monitors!(nodes[0], 1);
5489         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
5490 }
5491
5492 fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
5493         let chanmon_cfgs = create_chanmon_cfgs(3);
5494         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
5495         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
5496         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
5497         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5498
5499         // Fail the payment, but don't deliver A's final RAA, resulting in the HTLC only being present
5500         // in B's previous (unrevoked) commitment transaction, but none of A's commitment transactions.
5501         // Also optionally test that we *don't* fail the channel in case the commitment transaction was
5502         // actually revoked.
5503         let htlc_value = if use_dust { 50000 } else { 3000000 };
5504         let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
5505         nodes[1].node.fail_htlc_backwards(&our_payment_hash);
5506         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
5507         check_added_monitors!(nodes[1], 1);
5508
5509         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5510         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
5511         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
5512         check_added_monitors!(nodes[0], 1);
5513         let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5514         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0);
5515         check_added_monitors!(nodes[1], 1);
5516         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.1);
5517         check_added_monitors!(nodes[1], 1);
5518         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
5519
5520         if check_revoke_no_close {
5521                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
5522                 check_added_monitors!(nodes[0], 1);
5523         }
5524
5525         let starting_block = nodes[1].best_block_info();
5526         let mut block = Block {
5527                 header: BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
5528                 txdata: vec![],
5529         };
5530         for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 2 {
5531                 connect_block(&nodes[0], &block);
5532                 block.header.prev_blockhash = block.block_hash();
5533         }
5534         if !check_revoke_no_close {
5535                 test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
5536                 check_closed_broadcast!(nodes[0], true);
5537                 check_added_monitors!(nodes[0], 1);
5538                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
5539         } else {
5540                 expect_payment_failed!(nodes[0], our_payment_hash, true);
5541         }
5542 }
5543
5544 // Test that we close channels on-chain when broadcastable HTLCs reach their timeout window.
5545 // There are only a few cases to test here:
5546 //  * its not really normative behavior, but we test that below-dust HTLCs "included" in
5547 //    broadcastable commitment transactions result in channel closure,
5548 //  * its included in an unrevoked-but-previous remote commitment transaction,
5549 //  * its included in the latest remote or local commitment transactions.
5550 // We test each of the three possible commitment transactions individually and use both dust and
5551 // non-dust HTLCs.
5552 // Note that we don't bother testing both outbound and inbound HTLC failures for each case, and we
5553 // assume they are handled the same across all six cases, as both outbound and inbound failures are
5554 // tested for at least one of the cases in other tests.
5555 #[test]
5556 fn htlc_claim_single_commitment_only_a() {
5557         do_htlc_claim_local_commitment_only(true);
5558         do_htlc_claim_local_commitment_only(false);
5559
5560         do_htlc_claim_current_remote_commitment_only(true);
5561         do_htlc_claim_current_remote_commitment_only(false);
5562 }
5563
5564 #[test]
5565 fn htlc_claim_single_commitment_only_b() {
5566         do_htlc_claim_previous_remote_commitment_only(true, false);
5567         do_htlc_claim_previous_remote_commitment_only(false, false);
5568         do_htlc_claim_previous_remote_commitment_only(true, true);
5569         do_htlc_claim_previous_remote_commitment_only(false, true);
5570 }
5571
5572 #[test]
5573 #[should_panic]
5574 fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be on its own as we are catching a panic
5575         let chanmon_cfgs = create_chanmon_cfgs(2);
5576         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5577         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5578         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5579         // Force duplicate randomness for every get-random call
5580         for node in nodes.iter() {
5581                 *node.keys_manager.override_random_bytes.lock().unwrap() = Some([0; 32]);
5582         }
5583
5584         // BOLT #2 spec: Sending node must ensure temporary_channel_id is unique from any other channel ID with the same peer.
5585         let channel_value_satoshis=10000;
5586         let push_msat=10001;
5587         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).unwrap();
5588         let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5589         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel);
5590         get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
5591
5592         // Create a second channel with the same random values. This used to panic due to a colliding
5593         // channel_id, but now panics due to a colliding outbound SCID alias.
5594         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_err());
5595 }
5596
5597 #[test]
5598 fn bolt2_open_channel_sending_node_checks_part2() {
5599         let chanmon_cfgs = create_chanmon_cfgs(2);
5600         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5601         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5602         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5603
5604         // BOLT #2 spec: Sending node must set funding_satoshis to less than 2^24 satoshis
5605         let channel_value_satoshis=2^24;
5606         let push_msat=10001;
5607         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_err());
5608
5609         // BOLT #2 spec: Sending node must set push_msat to equal or less than 1000 * funding_satoshis
5610         let channel_value_satoshis=10000;
5611         // Test when push_msat is equal to 1000 * funding_satoshis.
5612         let push_msat=1000*channel_value_satoshis+1;
5613         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_err());
5614
5615         // BOLT #2 spec: Sending node must set set channel_reserve_satoshis greater than or equal to dust_limit_satoshis
5616         let channel_value_satoshis=10000;
5617         let push_msat=10001;
5618         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_ok()); //Create a valid channel
5619         let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5620         assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.dust_limit_satoshis);
5621
5622         // BOLT #2 spec: Sending node must set undefined bits in channel_flags to 0
5623         // Only the least-significant bit of channel_flags is currently defined resulting in channel_flags only having one of two possible states 0 or 1
5624         assert!(node0_to_1_send_open_channel.channel_flags<=1);
5625
5626         // BOLT #2 spec: Sending node should set to_self_delay sufficient to ensure the sender can irreversibly spend a commitment transaction output, in case of misbehaviour by the receiver.
5627         assert!(BREAKDOWN_TIMEOUT>0);
5628         assert!(node0_to_1_send_open_channel.to_self_delay==BREAKDOWN_TIMEOUT);
5629
5630         // BOLT #2 spec: Sending node must ensure the chain_hash value identifies the chain it wishes to open the channel within.
5631         let chain_hash=genesis_block(Network::Testnet).header.block_hash();
5632         assert_eq!(node0_to_1_send_open_channel.chain_hash,chain_hash);
5633
5634         // BOLT #2 spec: Sending node must set funding_pubkey, revocation_basepoint, htlc_basepoint, payment_basepoint, and delayed_payment_basepoint to valid DER-encoded, compressed, secp256k1 pubkeys.
5635         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.funding_pubkey.serialize()).is_ok());
5636         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.revocation_basepoint.serialize()).is_ok());
5637         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.htlc_basepoint.serialize()).is_ok());
5638         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.payment_point.serialize()).is_ok());
5639         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.delayed_payment_basepoint.serialize()).is_ok());
5640 }
5641
5642 #[test]
5643 fn bolt2_open_channel_sane_dust_limit() {
5644         let chanmon_cfgs = create_chanmon_cfgs(2);
5645         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5646         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5647         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5648
5649         let channel_value_satoshis=1000000;
5650         let push_msat=10001;
5651         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).unwrap();
5652         let mut node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5653         node0_to_1_send_open_channel.dust_limit_satoshis = 547;
5654         node0_to_1_send_open_channel.channel_reserve_satoshis = 100001;
5655
5656         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel);
5657         let events = nodes[1].node.get_and_clear_pending_msg_events();
5658         let err_msg = match events[0] {
5659                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
5660                         msg.clone()
5661                 },
5662                 _ => panic!("Unexpected event"),
5663         };
5664         assert_eq!(err_msg.data, "dust_limit_satoshis (547) is greater than the implementation limit (546)");
5665 }
5666
5667 // Test that if we fail to send an HTLC that is being freed from the holding cell, and the HTLC
5668 // originated from our node, its failure is surfaced to the user. We trigger this failure to
5669 // free the HTLC by increasing our fee while the HTLC is in the holding cell such that the HTLC
5670 // is no longer affordable once it's freed.
5671 #[test]
5672 fn test_fail_holding_cell_htlc_upon_free() {
5673         let chanmon_cfgs = create_chanmon_cfgs(2);
5674         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5675         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5676         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5677         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
5678
5679         // First nodes[0] generates an update_fee, setting the channel's
5680         // pending_update_fee.
5681         {
5682                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
5683                 *feerate_lock += 20;
5684         }
5685         nodes[0].node.timer_tick_occurred();
5686         check_added_monitors!(nodes[0], 1);
5687
5688         let events = nodes[0].node.get_and_clear_pending_msg_events();
5689         assert_eq!(events.len(), 1);
5690         let (update_msg, commitment_signed) = match events[0] {
5691                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
5692                         (update_fee.as_ref(), commitment_signed)
5693                 },
5694                 _ => panic!("Unexpected event"),
5695         };
5696
5697         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
5698
5699         let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5700         let channel_reserve = chan_stat.channel_reserve_msat;
5701         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
5702         let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2);
5703
5704         // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
5705         let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors);
5706         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
5707
5708         // Send a payment which passes reserve checks but gets stuck in the holding cell.
5709         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
5710                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
5711         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5712         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send);
5713
5714         // Flush the pending fee update.
5715         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
5716         let (as_revoke_and_ack, _) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5717         check_added_monitors!(nodes[1], 1);
5718         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack);
5719         check_added_monitors!(nodes[0], 1);
5720
5721         // Upon receipt of the RAA, there will be an attempt to resend the holding cell
5722         // HTLC, but now that the fee has been raised the payment will now fail, causing
5723         // us to surface its failure to the user.
5724         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5725         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
5726         nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", hex::encode(chan.2)), 1);
5727         let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({}) in channel {}",
5728                 hex::encode(our_payment_hash.0), chan_stat.channel_reserve_msat, hex::encode(chan.2));
5729         nodes[0].logger.assert_log("lightning::ln::channel".to_string(), failure_log.to_string(), 1);
5730
5731         // Check that the payment failed to be sent out.
5732         let events = nodes[0].node.get_and_clear_pending_events();
5733         assert_eq!(events.len(), 2);
5734         match &events[0] {
5735                 &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => {
5736                         assert_eq!(PaymentId(our_payment_hash.0), *payment_id.as_ref().unwrap());
5737                         assert_eq!(our_payment_hash.clone(), *payment_hash);
5738                         assert_eq!(*payment_failed_permanently, false);
5739                         assert_eq!(*short_channel_id, Some(route.paths[0][0].short_channel_id));
5740                 },
5741                 _ => panic!("Unexpected event"),
5742         }
5743         match &events[1] {
5744                 &Event::PaymentFailed { ref payment_hash, .. } => {
5745                         assert_eq!(our_payment_hash.clone(), *payment_hash);
5746                 },
5747                 _ => panic!("Unexpected event"),
5748         }
5749 }
5750
5751 // Test that if multiple HTLCs are released from the holding cell and one is
5752 // valid but the other is no longer valid upon release, the valid HTLC can be
5753 // successfully completed while the other one fails as expected.
5754 #[test]
5755 fn test_free_and_fail_holding_cell_htlcs() {
5756         let chanmon_cfgs = create_chanmon_cfgs(2);
5757         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5758         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5759         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5760         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
5761
5762         // First nodes[0] generates an update_fee, setting the channel's
5763         // pending_update_fee.
5764         {
5765                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
5766                 *feerate_lock += 200;
5767         }
5768         nodes[0].node.timer_tick_occurred();
5769         check_added_monitors!(nodes[0], 1);
5770
5771         let events = nodes[0].node.get_and_clear_pending_msg_events();
5772         assert_eq!(events.len(), 1);
5773         let (update_msg, commitment_signed) = match events[0] {
5774                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
5775                         (update_fee.as_ref(), commitment_signed)
5776                 },
5777                 _ => panic!("Unexpected event"),
5778         };
5779
5780         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
5781
5782         let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5783         let channel_reserve = chan_stat.channel_reserve_msat;
5784         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
5785         let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2);
5786
5787         // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
5788         let amt_1 = 20000;
5789         let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1, opt_anchors) - amt_1;
5790         let (route_1, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_1);
5791         let (route_2, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_2);
5792
5793         // Send 2 payments which pass reserve checks but get stuck in the holding cell.
5794         nodes[0].node.send_payment_with_route(&route_1, payment_hash_1,
5795                 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
5796         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5797         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1);
5798         let payment_id_2 = PaymentId(nodes[0].keys_manager.get_secure_random_bytes());
5799         nodes[0].node.send_payment_with_route(&route_2, payment_hash_2,
5800                 RecipientOnionFields::secret_only(payment_secret_2), payment_id_2).unwrap();
5801         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5802         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1 + amt_2);
5803
5804         // Flush the pending fee update.
5805         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
5806         let (revoke_and_ack, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5807         check_added_monitors!(nodes[1], 1);
5808         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_and_ack);
5809         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
5810         check_added_monitors!(nodes[0], 2);
5811
5812         // Upon receipt of the RAA, there will be an attempt to resend the holding cell HTLCs,
5813         // but now that the fee has been raised the second payment will now fail, causing us
5814         // to surface its failure to the user. The first payment should succeed.
5815         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5816         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
5817         nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", hex::encode(chan.2)), 1);
5818         let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({}) in channel {}",
5819                 hex::encode(payment_hash_2.0), chan_stat.channel_reserve_msat, hex::encode(chan.2));
5820         nodes[0].logger.assert_log("lightning::ln::channel".to_string(), failure_log.to_string(), 1);
5821
5822         // Check that the second payment failed to be sent out.
5823         let events = nodes[0].node.get_and_clear_pending_events();
5824         assert_eq!(events.len(), 2);
5825         match &events[0] {
5826                 &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => {
5827                         assert_eq!(payment_id_2, *payment_id.as_ref().unwrap());
5828                         assert_eq!(payment_hash_2.clone(), *payment_hash);
5829                         assert_eq!(*payment_failed_permanently, false);
5830                         assert_eq!(*short_channel_id, Some(route_2.paths[0][0].short_channel_id));
5831                 },
5832                 _ => panic!("Unexpected event"),
5833         }
5834         match &events[1] {
5835                 &Event::PaymentFailed { ref payment_hash, .. } => {
5836                         assert_eq!(payment_hash_2.clone(), *payment_hash);
5837                 },
5838                 _ => panic!("Unexpected event"),
5839         }
5840
5841         // Complete the first payment and the RAA from the fee update.
5842         let (payment_event, send_raa_event) = {
5843                 let mut msgs = nodes[0].node.get_and_clear_pending_msg_events();
5844                 assert_eq!(msgs.len(), 2);
5845                 (SendEvent::from_event(msgs.remove(0)), msgs.remove(0))
5846         };
5847         let raa = match send_raa_event {
5848                 MessageSendEvent::SendRevokeAndACK { msg, .. } => msg,
5849                 _ => panic!("Unexpected event"),
5850         };
5851         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
5852         check_added_monitors!(nodes[1], 1);
5853         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
5854         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
5855         let events = nodes[1].node.get_and_clear_pending_events();
5856         assert_eq!(events.len(), 1);
5857         match events[0] {
5858                 Event::PendingHTLCsForwardable { .. } => {},
5859                 _ => panic!("Unexpected event"),
5860         }
5861         nodes[1].node.process_pending_htlc_forwards();
5862         let events = nodes[1].node.get_and_clear_pending_events();
5863         assert_eq!(events.len(), 1);
5864         match events[0] {
5865                 Event::PaymentClaimable { .. } => {},
5866                 _ => panic!("Unexpected event"),
5867         }
5868         nodes[1].node.claim_funds(payment_preimage_1);
5869         check_added_monitors!(nodes[1], 1);
5870         expect_payment_claimed!(nodes[1], payment_hash_1, amt_1);
5871
5872         let update_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5873         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msgs.update_fulfill_htlcs[0]);
5874         commitment_signed_dance!(nodes[0], nodes[1], update_msgs.commitment_signed, false, true);
5875         expect_payment_sent!(nodes[0], payment_preimage_1);
5876 }
5877
5878 // Test that if we fail to forward an HTLC that is being freed from the holding cell that the
5879 // HTLC is failed backwards. We trigger this failure to forward the freed HTLC by increasing
5880 // our fee while the HTLC is in the holding cell such that the HTLC is no longer affordable
5881 // once it's freed.
5882 #[test]
5883 fn test_fail_holding_cell_htlc_upon_free_multihop() {
5884         let chanmon_cfgs = create_chanmon_cfgs(3);
5885         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
5886         // When this test was written, the default base fee floated based on the HTLC count.
5887         // It is now fixed, so we simply set the fee to the expected value here.
5888         let mut config = test_default_channel_config();
5889         config.channel_config.forwarding_fee_base_msat = 196;
5890         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
5891         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
5892         let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
5893         let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000);
5894
5895         // First nodes[1] generates an update_fee, setting the channel's
5896         // pending_update_fee.
5897         {
5898                 let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
5899                 *feerate_lock += 20;
5900         }
5901         nodes[1].node.timer_tick_occurred();
5902         check_added_monitors!(nodes[1], 1);
5903
5904         let events = nodes[1].node.get_and_clear_pending_msg_events();
5905         assert_eq!(events.len(), 1);
5906         let (update_msg, commitment_signed) = match events[0] {
5907                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
5908                         (update_fee.as_ref(), commitment_signed)
5909                 },
5910                 _ => panic!("Unexpected event"),
5911         };
5912
5913         nodes[2].node.handle_update_fee(&nodes[1].node.get_our_node_id(), update_msg.unwrap());
5914
5915         let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan_0_1.2);
5916         let channel_reserve = chan_stat.channel_reserve_msat;
5917         let feerate = get_feerate!(nodes[0], nodes[1], chan_0_1.2);
5918         let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan_0_1.2);
5919
5920         // Send a payment which passes reserve checks but gets stuck in the holding cell.
5921         let feemsat = 239;
5922         let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat;
5923         let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors) - total_routing_fee_msat;
5924         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], max_can_send);
5925         let payment_event = {
5926                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
5927                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
5928                 check_added_monitors!(nodes[0], 1);
5929
5930                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
5931                 assert_eq!(events.len(), 1);
5932
5933                 SendEvent::from_event(events.remove(0))
5934         };
5935         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
5936         check_added_monitors!(nodes[1], 0);
5937         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
5938         expect_pending_htlcs_forwardable!(nodes[1]);
5939
5940         chan_stat = get_channel_value_stat!(nodes[1], nodes[2], chan_1_2.2);
5941         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send);
5942
5943         // Flush the pending fee update.
5944         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
5945         let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
5946         check_added_monitors!(nodes[2], 1);
5947         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &raa);
5948         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &commitment_signed);
5949         check_added_monitors!(nodes[1], 2);
5950
5951         // A final RAA message is generated to finalize the fee update.
5952         let events = nodes[1].node.get_and_clear_pending_msg_events();
5953         assert_eq!(events.len(), 1);
5954
5955         let raa_msg = match &events[0] {
5956                 &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => {
5957                         msg.clone()
5958                 },
5959                 _ => panic!("Unexpected event"),
5960         };
5961
5962         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa_msg);
5963         check_added_monitors!(nodes[2], 1);
5964         assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
5965
5966         // nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process.
5967         let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events();
5968         assert_eq!(process_htlc_forwards_event.len(), 2);
5969         match &process_htlc_forwards_event[0] {
5970                 &Event::PendingHTLCsForwardable { .. } => {},
5971                 _ => panic!("Unexpected event"),
5972         }
5973
5974         // In response, we call ChannelManager's process_pending_htlc_forwards
5975         nodes[1].node.process_pending_htlc_forwards();
5976         check_added_monitors!(nodes[1], 1);
5977
5978         // This causes the HTLC to be failed backwards.
5979         let fail_event = nodes[1].node.get_and_clear_pending_msg_events();
5980         assert_eq!(fail_event.len(), 1);
5981         let (fail_msg, commitment_signed) = match &fail_event[0] {
5982                 &MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
5983                         assert_eq!(updates.update_add_htlcs.len(), 0);
5984                         assert_eq!(updates.update_fulfill_htlcs.len(), 0);
5985                         assert_eq!(updates.update_fail_malformed_htlcs.len(), 0);
5986                         assert_eq!(updates.update_fail_htlcs.len(), 1);
5987                         (updates.update_fail_htlcs[0].clone(), updates.commitment_signed.clone())
5988                 },
5989                 _ => panic!("Unexpected event"),
5990         };
5991
5992         // Pass the failure messages back to nodes[0].
5993         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg);
5994         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
5995
5996         // Complete the HTLC failure+removal process.
5997         let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5998         check_added_monitors!(nodes[0], 1);
5999         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
6000         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed);
6001         check_added_monitors!(nodes[1], 2);
6002         let final_raa_event = nodes[1].node.get_and_clear_pending_msg_events();
6003         assert_eq!(final_raa_event.len(), 1);
6004         let raa = match &final_raa_event[0] {
6005                 &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => msg.clone(),
6006                 _ => panic!("Unexpected event"),
6007         };
6008         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa);
6009         expect_payment_failed_with_update!(nodes[0], our_payment_hash, false, chan_1_2.0.contents.short_channel_id, false);
6010         check_added_monitors!(nodes[0], 1);
6011 }
6012
6013 // BOLT 2 Requirements for the Sender when constructing and sending an update_add_htlc message.
6014 // BOLT 2 Requirement: MUST NOT offer amount_msat it cannot pay for in the remote commitment transaction at the current feerate_per_kw (see "Updating Fees") while maintaining its channel reserve.
6015 //TODO: I don't believe this is explicitly enforced when sending an HTLC but as the Fee aspect of the BOLT specs is in flux leaving this as a TODO.
6016
6017 #[test]
6018 fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() {
6019         //BOLT2 Requirement: MUST NOT offer amount_msat below the receiving node's htlc_minimum_msat (same validation check catches both of these)
6020         let chanmon_cfgs = create_chanmon_cfgs(2);
6021         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6022         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6023         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6024         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6025
6026         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6027         route.paths[0][0].fee_msat = 100;
6028
6029         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6030                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6031                 ), true, APIError::ChannelUnavailable { ref err },
6032                 assert!(regex::Regex::new(r"Cannot send less than their minimum HTLC value \(\d+\)").unwrap().is_match(err)));
6033         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6034         nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send less than their minimum HTLC value", 1);
6035 }
6036
6037 #[test]
6038 fn test_update_add_htlc_bolt2_sender_zero_value_msat() {
6039         //BOLT2 Requirement: MUST offer amount_msat greater than 0.
6040         let chanmon_cfgs = create_chanmon_cfgs(2);
6041         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6042         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6043         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6044         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6045
6046         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6047         route.paths[0][0].fee_msat = 0;
6048         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6049                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)),
6050                 true, APIError::ChannelUnavailable { ref err },
6051                 assert_eq!(err, "Cannot send 0-msat HTLC"));
6052
6053         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6054         nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send 0-msat HTLC", 1);
6055 }
6056
6057 #[test]
6058 fn test_update_add_htlc_bolt2_receiver_zero_value_msat() {
6059         //BOLT2 Requirement: MUST offer amount_msat greater than 0.
6060         let chanmon_cfgs = create_chanmon_cfgs(2);
6061         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6062         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6063         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6064         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6065
6066         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6067         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6068                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6069         check_added_monitors!(nodes[0], 1);
6070         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6071         updates.update_add_htlcs[0].amount_msat = 0;
6072
6073         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6074         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1);
6075         check_closed_broadcast!(nodes[1], true).unwrap();
6076         check_added_monitors!(nodes[1], 1);
6077         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() });
6078 }
6079
6080 #[test]
6081 fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() {
6082         //BOLT 2 Requirement: MUST set cltv_expiry less than 500000000.
6083         //It is enforced when constructing a route.
6084         let chanmon_cfgs = create_chanmon_cfgs(2);
6085         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6086         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6087         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6088         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
6089
6090         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0)
6091                 .with_features(nodes[1].node.invoice_features());
6092         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000, 0);
6093         route.paths[0].last_mut().unwrap().cltv_expiry_delta = 500000001;
6094         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6095                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6096                 ), true, APIError::InvalidRoute { ref err },
6097                 assert_eq!(err, &"Channel CLTV overflowed?"));
6098 }
6099
6100 #[test]
6101 fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() {
6102         //BOLT 2 Requirement: if result would be offering more than the remote's max_accepted_htlcs HTLCs, in the remote commitment transaction: MUST NOT add an HTLC.
6103         //BOLT 2 Requirement: for the first HTLC it offers MUST set id to 0.
6104         //BOLT 2 Requirement: MUST increase the value of id by 1 for each successive offer.
6105         let chanmon_cfgs = create_chanmon_cfgs(2);
6106         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6107         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6108         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6109         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
6110         let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
6111                 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().counterparty_max_accepted_htlcs as u64;
6112
6113         for i in 0..max_accepted_htlcs {
6114                 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6115                 let payment_event = {
6116                         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6117                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6118                         check_added_monitors!(nodes[0], 1);
6119
6120                         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6121                         assert_eq!(events.len(), 1);
6122                         if let MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate{ update_add_htlcs: ref htlcs, .. }, } = events[0] {
6123                                 assert_eq!(htlcs[0].htlc_id, i);
6124                         } else {
6125                                 assert!(false);
6126                         }
6127                         SendEvent::from_event(events.remove(0))
6128                 };
6129                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6130                 check_added_monitors!(nodes[1], 0);
6131                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6132
6133                 expect_pending_htlcs_forwardable!(nodes[1]);
6134                 expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 100000);
6135         }
6136         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6137         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6138                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6139                 ), true, APIError::ChannelUnavailable { ref err },
6140                 assert!(regex::Regex::new(r"Cannot push more than their max accepted HTLCs \(\d+\)").unwrap().is_match(err)));
6141
6142         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6143         nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot push more than their max accepted HTLCs", 1);
6144 }
6145
6146 #[test]
6147 fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() {
6148         //BOLT 2 Requirement: if the sum of total offered HTLCs would exceed the remote's max_htlc_value_in_flight_msat: MUST NOT add an HTLC.
6149         let chanmon_cfgs = create_chanmon_cfgs(2);
6150         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6151         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6152         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6153         let channel_value = 100000;
6154         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 0);
6155         let max_in_flight = get_channel_value_stat!(nodes[0], nodes[1], chan.2).counterparty_max_htlc_value_in_flight_msat;
6156
6157         send_payment(&nodes[0], &vec!(&nodes[1])[..], max_in_flight);
6158
6159         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_in_flight);
6160         // Manually create a route over our max in flight (which our router normally automatically
6161         // limits us to.
6162         route.paths[0][0].fee_msat =  max_in_flight + 1;
6163         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6164                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6165                 ), true, APIError::ChannelUnavailable { ref err },
6166                 assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err)));
6167
6168         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6169         nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put us over the max HTLC value in flight our peer will accept", 1);
6170
6171         send_payment(&nodes[0], &[&nodes[1]], max_in_flight);
6172 }
6173
6174 // BOLT 2 Requirements for the Receiver when handling an update_add_htlc message.
6175 #[test]
6176 fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
6177         //BOLT2 Requirement: receiving an amount_msat equal to 0, OR less than its own htlc_minimum_msat -> SHOULD fail the channel.
6178         let chanmon_cfgs = create_chanmon_cfgs(2);
6179         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6180         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6181         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6182         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6183         let htlc_minimum_msat: u64;
6184         {
6185                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
6186                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
6187                 let channel = chan_lock.channel_by_id.get(&chan.2).unwrap();
6188                 htlc_minimum_msat = channel.get_holder_htlc_minimum_msat();
6189         }
6190
6191         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat);
6192         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6193                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6194         check_added_monitors!(nodes[0], 1);
6195         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6196         updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat-1;
6197         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6198         assert!(nodes[1].node.list_channels().is_empty());
6199         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6200         assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
6201         check_added_monitors!(nodes[1], 1);
6202         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
6203 }
6204
6205 #[test]
6206 fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
6207         //BOLT2 Requirement: receiving an amount_msat that the sending node cannot afford at the current feerate_per_kw (while maintaining its channel reserve): SHOULD fail the channel
6208         let chanmon_cfgs = create_chanmon_cfgs(2);
6209         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6210         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6211         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6212         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6213
6214         let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6215         let channel_reserve = chan_stat.channel_reserve_msat;
6216         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
6217         let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2);
6218         // The 2* and +1 are for the fee spike reserve.
6219         let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1, opt_anchors);
6220
6221         let max_can_send = 5000000 - channel_reserve - commit_tx_fee_outbound;
6222         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
6223         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6224                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6225         check_added_monitors!(nodes[0], 1);
6226         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6227
6228         // Even though channel-initiator senders are required to respect the fee_spike_reserve,
6229         // at this time channel-initiatee receivers are not required to enforce that senders
6230         // respect the fee_spike_reserve.
6231         updates.update_add_htlcs[0].amount_msat = max_can_send + commit_tx_fee_outbound + 1;
6232         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6233
6234         assert!(nodes[1].node.list_channels().is_empty());
6235         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6236         assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
6237         check_added_monitors!(nodes[1], 1);
6238         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
6239 }
6240
6241 #[test]
6242 fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
6243         //BOLT 2 Requirement: if a sending node adds more than its max_accepted_htlcs HTLCs to its local commitment transaction: SHOULD fail the channel
6244         //BOLT 2 Requirement: MUST allow multiple HTLCs with the same payment_hash.
6245         let chanmon_cfgs = create_chanmon_cfgs(2);
6246         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6247         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6248         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6249         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6250
6251         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 3999999);
6252         let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
6253         let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1;
6254         let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route.paths[0], &session_priv).unwrap();
6255         let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 3999999, &Some(our_payment_secret), cur_height, &None).unwrap();
6256         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash);
6257
6258         let mut msg = msgs::UpdateAddHTLC {
6259                 channel_id: chan.2,
6260                 htlc_id: 0,
6261                 amount_msat: 1000,
6262                 payment_hash: our_payment_hash,
6263                 cltv_expiry: htlc_cltv,
6264                 onion_routing_packet: onion_packet.clone(),
6265         };
6266
6267         for i in 0..super::channel::OUR_MAX_HTLCS {
6268                 msg.htlc_id = i as u64;
6269                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
6270         }
6271         msg.htlc_id = (super::channel::OUR_MAX_HTLCS) as u64;
6272         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
6273
6274         assert!(nodes[1].node.list_channels().is_empty());
6275         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6276         assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
6277         check_added_monitors!(nodes[1], 1);
6278         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
6279 }
6280
6281 #[test]
6282 fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() {
6283         //OR adds more than its max_htlc_value_in_flight_msat worth of offered HTLCs to its local commitment transaction: SHOULD fail the channel
6284         let chanmon_cfgs = create_chanmon_cfgs(2);
6285         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6286         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6287         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6288         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6289
6290         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6291         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6292                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6293         check_added_monitors!(nodes[0], 1);
6294         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6295         updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], nodes[0], chan.2).counterparty_max_htlc_value_in_flight_msat + 1;
6296         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6297
6298         assert!(nodes[1].node.list_channels().is_empty());
6299         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6300         assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str()));
6301         check_added_monitors!(nodes[1], 1);
6302         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
6303 }
6304
6305 #[test]
6306 fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() {
6307         //BOLT2 Requirement: if sending node sets cltv_expiry to greater or equal to 500000000: SHOULD fail the channel.
6308         let chanmon_cfgs = create_chanmon_cfgs(2);
6309         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6310         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6311         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6312
6313         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6314         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6315         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6316                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6317         check_added_monitors!(nodes[0], 1);
6318         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6319         updates.update_add_htlcs[0].cltv_expiry = 500000000;
6320         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6321
6322         assert!(nodes[1].node.list_channels().is_empty());
6323         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6324         assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height");
6325         check_added_monitors!(nodes[1], 1);
6326         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
6327 }
6328
6329 #[test]
6330 fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
6331         //BOLT 2 requirement: if the sender did not previously acknowledge the commitment of that HTLC: MUST ignore a repeated id value after a reconnection.
6332         // We test this by first testing that that repeated HTLCs pass commitment signature checks
6333         // after disconnect and that non-sequential htlc_ids result in a channel failure.
6334         let chanmon_cfgs = create_chanmon_cfgs(2);
6335         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6336         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6337         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6338
6339         create_announced_chan_between_nodes(&nodes, 0, 1);
6340         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6341         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6342                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6343         check_added_monitors!(nodes[0], 1);
6344         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6345         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6346
6347         //Disconnect and Reconnect
6348         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
6349         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
6350         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
6351         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
6352         assert_eq!(reestablish_1.len(), 1);
6353         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
6354         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
6355         assert_eq!(reestablish_2.len(), 1);
6356         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
6357         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
6358         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
6359         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
6360
6361         //Resend HTLC
6362         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6363         assert_eq!(updates.commitment_signed.htlc_signatures.len(), 1);
6364         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed);
6365         check_added_monitors!(nodes[1], 1);
6366         let _bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6367
6368         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6369
6370         assert!(nodes[1].node.list_channels().is_empty());
6371         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6372         assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str()));
6373         check_added_monitors!(nodes[1], 1);
6374         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
6375 }
6376
6377 #[test]
6378 fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() {
6379         //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6380
6381         let chanmon_cfgs = create_chanmon_cfgs(2);
6382         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6383         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6384         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6385         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6386         let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6387         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6388                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6389
6390         check_added_monitors!(nodes[0], 1);
6391         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6392         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6393
6394         let update_msg = msgs::UpdateFulfillHTLC{
6395                 channel_id: chan.2,
6396                 htlc_id: 0,
6397                 payment_preimage: our_payment_preimage,
6398         };
6399
6400         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6401
6402         assert!(nodes[0].node.list_channels().is_empty());
6403         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6404         assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6405         check_added_monitors!(nodes[0], 1);
6406         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
6407 }
6408
6409 #[test]
6410 fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() {
6411         //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6412
6413         let chanmon_cfgs = create_chanmon_cfgs(2);
6414         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6415         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6416         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6417         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6418
6419         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6420         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6421                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6422         check_added_monitors!(nodes[0], 1);
6423         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6424         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6425
6426         let update_msg = msgs::UpdateFailHTLC{
6427                 channel_id: chan.2,
6428                 htlc_id: 0,
6429                 reason: msgs::OnionErrorPacket { data: Vec::new()},
6430         };
6431
6432         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6433
6434         assert!(nodes[0].node.list_channels().is_empty());
6435         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6436         assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6437         check_added_monitors!(nodes[0], 1);
6438         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
6439 }
6440
6441 #[test]
6442 fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() {
6443         //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6444
6445         let chanmon_cfgs = create_chanmon_cfgs(2);
6446         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6447         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6448         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6449         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6450
6451         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6452         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6453                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6454         check_added_monitors!(nodes[0], 1);
6455         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6456         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6457         let update_msg = msgs::UpdateFailMalformedHTLC{
6458                 channel_id: chan.2,
6459                 htlc_id: 0,
6460                 sha256_of_onion: [1; 32],
6461                 failure_code: 0x8000,
6462         };
6463
6464         nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6465
6466         assert!(nodes[0].node.list_channels().is_empty());
6467         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6468         assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6469         check_added_monitors!(nodes[0], 1);
6470         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
6471 }
6472
6473 #[test]
6474 fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
6475         //BOLT 2 Requirement: A receiving node: if the id does not correspond to an HTLC in its current commitment transaction MUST fail the channel.
6476
6477         let chanmon_cfgs = create_chanmon_cfgs(2);
6478         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6479         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6480         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6481         create_announced_chan_between_nodes(&nodes, 0, 1);
6482
6483         let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
6484
6485         nodes[1].node.claim_funds(our_payment_preimage);
6486         check_added_monitors!(nodes[1], 1);
6487         expect_payment_claimed!(nodes[1], our_payment_hash, 100_000);
6488
6489         let events = nodes[1].node.get_and_clear_pending_msg_events();
6490         assert_eq!(events.len(), 1);
6491         let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
6492                 match events[0] {
6493                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6494                                 assert!(update_add_htlcs.is_empty());
6495                                 assert_eq!(update_fulfill_htlcs.len(), 1);
6496                                 assert!(update_fail_htlcs.is_empty());
6497                                 assert!(update_fail_malformed_htlcs.is_empty());
6498                                 assert!(update_fee.is_none());
6499                                 update_fulfill_htlcs[0].clone()
6500                         },
6501                         _ => panic!("Unexpected event"),
6502                 }
6503         };
6504
6505         update_fulfill_msg.htlc_id = 1;
6506
6507         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
6508
6509         assert!(nodes[0].node.list_channels().is_empty());
6510         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6511         assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find");
6512         check_added_monitors!(nodes[0], 1);
6513         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
6514 }
6515
6516 #[test]
6517 fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
6518         //BOLT 2 Requirement: A receiving node: if the payment_preimage value in update_fulfill_htlc doesn't SHA256 hash to the corresponding HTLC payment_hash MUST fail the channel.
6519
6520         let chanmon_cfgs = create_chanmon_cfgs(2);
6521         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6522         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6523         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6524         create_announced_chan_between_nodes(&nodes, 0, 1);
6525
6526         let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
6527
6528         nodes[1].node.claim_funds(our_payment_preimage);
6529         check_added_monitors!(nodes[1], 1);
6530         expect_payment_claimed!(nodes[1], our_payment_hash, 100_000);
6531
6532         let events = nodes[1].node.get_and_clear_pending_msg_events();
6533         assert_eq!(events.len(), 1);
6534         let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
6535                 match events[0] {
6536                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6537                                 assert!(update_add_htlcs.is_empty());
6538                                 assert_eq!(update_fulfill_htlcs.len(), 1);
6539                                 assert!(update_fail_htlcs.is_empty());
6540                                 assert!(update_fail_malformed_htlcs.is_empty());
6541                                 assert!(update_fee.is_none());
6542                                 update_fulfill_htlcs[0].clone()
6543                         },
6544                         _ => panic!("Unexpected event"),
6545                 }
6546         };
6547
6548         update_fulfill_msg.payment_preimage = PaymentPreimage([1; 32]);
6549
6550         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
6551
6552         assert!(nodes[0].node.list_channels().is_empty());
6553         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6554         assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str()));
6555         check_added_monitors!(nodes[0], 1);
6556         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
6557 }
6558
6559 #[test]
6560 fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_message() {
6561         //BOLT 2 Requirement: A receiving node: if the BADONION bit in failure_code is not set for update_fail_malformed_htlc MUST fail the channel.
6562
6563         let chanmon_cfgs = create_chanmon_cfgs(2);
6564         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6565         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6566         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6567         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6568
6569         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6570         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6571                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6572         check_added_monitors!(nodes[0], 1);
6573
6574         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6575         updates.update_add_htlcs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
6576
6577         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6578         check_added_monitors!(nodes[1], 0);
6579         commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true);
6580
6581         let events = nodes[1].node.get_and_clear_pending_msg_events();
6582
6583         let mut update_msg: msgs::UpdateFailMalformedHTLC = {
6584                 match events[0] {
6585                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6586                                 assert!(update_add_htlcs.is_empty());
6587                                 assert!(update_fulfill_htlcs.is_empty());
6588                                 assert!(update_fail_htlcs.is_empty());
6589                                 assert_eq!(update_fail_malformed_htlcs.len(), 1);
6590                                 assert!(update_fee.is_none());
6591                                 update_fail_malformed_htlcs[0].clone()
6592                         },
6593                         _ => panic!("Unexpected event"),
6594                 }
6595         };
6596         update_msg.failure_code &= !0x8000;
6597         nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6598
6599         assert!(nodes[0].node.list_channels().is_empty());
6600         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6601         assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set");
6602         check_added_monitors!(nodes[0], 1);
6603         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
6604 }
6605
6606 #[test]
6607 fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_update_fail_htlc() {
6608         //BOLT 2 Requirement: a receiving node which has an outgoing HTLC canceled by update_fail_malformed_htlc:
6609         //    * MUST return an error in the update_fail_htlc sent to the link which originally sent the HTLC, using the failure_code given and setting the data to sha256_of_onion.
6610
6611         let chanmon_cfgs = create_chanmon_cfgs(3);
6612         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6613         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
6614         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6615         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6616         let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000);
6617
6618         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
6619
6620         //First hop
6621         let mut payment_event = {
6622                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6623                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6624                 check_added_monitors!(nodes[0], 1);
6625                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6626                 assert_eq!(events.len(), 1);
6627                 SendEvent::from_event(events.remove(0))
6628         };
6629         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6630         check_added_monitors!(nodes[1], 0);
6631         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6632         expect_pending_htlcs_forwardable!(nodes[1]);
6633         let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
6634         assert_eq!(events_2.len(), 1);
6635         check_added_monitors!(nodes[1], 1);
6636         payment_event = SendEvent::from_event(events_2.remove(0));
6637         assert_eq!(payment_event.msgs.len(), 1);
6638
6639         //Second Hop
6640         payment_event.msgs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
6641         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
6642         check_added_monitors!(nodes[2], 0);
6643         commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true);
6644
6645         let events_3 = nodes[2].node.get_and_clear_pending_msg_events();
6646         assert_eq!(events_3.len(), 1);
6647         let update_msg : (msgs::UpdateFailMalformedHTLC, msgs::CommitmentSigned) = {
6648                 match events_3[0] {
6649                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
6650                                 assert!(update_add_htlcs.is_empty());
6651                                 assert!(update_fulfill_htlcs.is_empty());
6652                                 assert!(update_fail_htlcs.is_empty());
6653                                 assert_eq!(update_fail_malformed_htlcs.len(), 1);
6654                                 assert!(update_fee.is_none());
6655                                 (update_fail_malformed_htlcs[0].clone(), commitment_signed.clone())
6656                         },
6657                         _ => panic!("Unexpected event"),
6658                 }
6659         };
6660
6661         nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), &update_msg.0);
6662
6663         check_added_monitors!(nodes[1], 0);
6664         commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true);
6665         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
6666         let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
6667         assert_eq!(events_4.len(), 1);
6668
6669         //Confirm that handlinge the update_malformed_htlc message produces an update_fail_htlc message to be forwarded back along the route
6670         match events_4[0] {
6671                 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6672                         assert!(update_add_htlcs.is_empty());
6673                         assert!(update_fulfill_htlcs.is_empty());
6674                         assert_eq!(update_fail_htlcs.len(), 1);
6675                         assert!(update_fail_malformed_htlcs.is_empty());
6676                         assert!(update_fee.is_none());
6677                 },
6678                 _ => panic!("Unexpected event"),
6679         };
6680
6681         check_added_monitors!(nodes[1], 1);
6682 }
6683
6684 #[test]
6685 fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() {
6686         let chanmon_cfgs = create_chanmon_cfgs(3);
6687         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6688         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
6689         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6690         create_announced_chan_between_nodes(&nodes, 0, 1);
6691         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
6692
6693         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100_000);
6694
6695         // First hop
6696         let mut payment_event = {
6697                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6698                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6699                 check_added_monitors!(nodes[0], 1);
6700                 SendEvent::from_node(&nodes[0])
6701         };
6702
6703         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6704         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6705         expect_pending_htlcs_forwardable!(nodes[1]);
6706         check_added_monitors!(nodes[1], 1);
6707         payment_event = SendEvent::from_node(&nodes[1]);
6708         assert_eq!(payment_event.msgs.len(), 1);
6709
6710         // Second Hop
6711         payment_event.msgs[0].onion_routing_packet.version = 1; // Trigger an invalid_onion_version error
6712         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
6713         check_added_monitors!(nodes[2], 0);
6714         commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true);
6715
6716         let events_3 = nodes[2].node.get_and_clear_pending_msg_events();
6717         assert_eq!(events_3.len(), 1);
6718         match events_3[0] {
6719                 MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
6720                         let mut update_msg = updates.update_fail_malformed_htlcs[0].clone();
6721                         // Set the NODE bit (BADONION and PERM already set in invalid_onion_version error)
6722                         update_msg.failure_code |= 0x2000;
6723
6724                         nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), &update_msg);
6725                         commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true);
6726                 },
6727                 _ => panic!("Unexpected event"),
6728         }
6729
6730         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1],
6731                 vec![HTLCDestination::NextHopChannel {
6732                         node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
6733         let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
6734         assert_eq!(events_4.len(), 1);
6735         check_added_monitors!(nodes[1], 1);
6736
6737         match events_4[0] {
6738                 MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
6739                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
6740                         commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true);
6741                 },
6742                 _ => panic!("Unexpected event"),
6743         }
6744
6745         let events_5 = nodes[0].node.get_and_clear_pending_events();
6746         assert_eq!(events_5.len(), 2);
6747
6748         // Expect a PaymentPathFailed event with a ChannelFailure network update for the channel between
6749         // the node originating the error to its next hop.
6750         match events_5[0] {
6751                 Event::PaymentPathFailed { error_code, failure: PathFailure::OnPath { network_update: Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent }) }, ..
6752                 } => {
6753                         assert_eq!(short_channel_id, chan_2.0.contents.short_channel_id);
6754                         assert!(is_permanent);
6755                         assert_eq!(error_code, Some(0x8000|0x4000|0x2000|4));
6756                 },
6757                 _ => panic!("Unexpected event"),
6758         }
6759         match events_5[1] {
6760                 Event::PaymentFailed { payment_hash, .. } => {
6761                         assert_eq!(payment_hash, our_payment_hash);
6762                 },
6763                 _ => panic!("Unexpected event"),
6764         }
6765
6766         // TODO: Test actual removal of channel from NetworkGraph when it's implemented.
6767 }
6768
6769 fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
6770         // Dust-HTLC failure updates must be delayed until failure-trigger tx (in this case local commitment) reach ANTI_REORG_DELAY
6771         // We can have at most two valid local commitment tx, so both cases must be covered, and both txs must be checked to get them all as
6772         // HTLC could have been removed from lastest local commitment tx but still valid until we get remote RAA
6773
6774         let mut chanmon_cfgs = create_chanmon_cfgs(2);
6775         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
6776         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6777         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6778         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6779         let chan =create_announced_chan_between_nodes(&nodes, 0, 1);
6780
6781         let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
6782                 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis;
6783
6784         // We route 2 dust-HTLCs between A and B
6785         let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
6786         let (_, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
6787         route_payment(&nodes[0], &[&nodes[1]], 1000000);
6788
6789         // Cache one local commitment tx as previous
6790         let as_prev_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
6791
6792         // Fail one HTLC to prune it in the will-be-latest-local commitment tx
6793         nodes[1].node.fail_htlc_backwards(&payment_hash_2);
6794         check_added_monitors!(nodes[1], 0);
6795         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
6796         check_added_monitors!(nodes[1], 1);
6797
6798         let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6799         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &remove.update_fail_htlcs[0]);
6800         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &remove.commitment_signed);
6801         check_added_monitors!(nodes[0], 1);
6802
6803         // Cache one local commitment tx as lastest
6804         let as_last_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
6805
6806         let events = nodes[0].node.get_and_clear_pending_msg_events();
6807         match events[0] {
6808                 MessageSendEvent::SendRevokeAndACK { node_id, .. } => {
6809                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
6810                 },
6811                 _ => panic!("Unexpected event"),
6812         }
6813         match events[1] {
6814                 MessageSendEvent::UpdateHTLCs { node_id, .. } => {
6815                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
6816                 },
6817                 _ => panic!("Unexpected event"),
6818         }
6819
6820         assert_ne!(as_prev_commitment_tx, as_last_commitment_tx);
6821         // Fail the 2 dust-HTLCs, move their failure in maturation buffer (htlc_updated_waiting_threshold_conf)
6822         if announce_latest {
6823                 mine_transaction(&nodes[0], &as_last_commitment_tx[0]);
6824         } else {
6825                 mine_transaction(&nodes[0], &as_prev_commitment_tx[0]);
6826         }
6827
6828         check_closed_broadcast!(nodes[0], true);
6829         check_added_monitors!(nodes[0], 1);
6830         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
6831
6832         assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
6833         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
6834         let events = nodes[0].node.get_and_clear_pending_events();
6835         // Only 2 PaymentPathFailed events should show up, over-dust HTLC has to be failed by timeout tx
6836         assert_eq!(events.len(), 4);
6837         let mut first_failed = false;
6838         for event in events {
6839                 match event {
6840                         Event::PaymentPathFailed { payment_hash, .. } => {
6841                                 if payment_hash == payment_hash_1 {
6842                                         assert!(!first_failed);
6843                                         first_failed = true;
6844                                 } else {
6845                                         assert_eq!(payment_hash, payment_hash_2);
6846                                 }
6847                         },
6848                         Event::PaymentFailed { .. } => {}
6849                         _ => panic!("Unexpected event"),
6850                 }
6851         }
6852 }
6853
6854 #[test]
6855 fn test_failure_delay_dust_htlc_local_commitment() {
6856         do_test_failure_delay_dust_htlc_local_commitment(true);
6857         do_test_failure_delay_dust_htlc_local_commitment(false);
6858 }
6859
6860 fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
6861         // Outbound HTLC-failure updates must be cancelled if we get a reorg before we reach ANTI_REORG_DELAY.
6862         // Broadcast of revoked remote commitment tx, trigger failure-update of dust/non-dust HTLCs
6863         // Broadcast of remote commitment tx, trigger failure-update of dust-HTLCs
6864         // Broadcast of timeout tx on remote commitment tx, trigger failure-udate of non-dust HTLCs
6865         // Broadcast of local commitment tx, trigger failure-update of dust-HTLCs
6866         // Broadcast of HTLC-timeout tx on local commitment tx, trigger failure-update of non-dust HTLCs
6867
6868         let chanmon_cfgs = create_chanmon_cfgs(3);
6869         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6870         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
6871         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6872         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6873
6874         let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
6875                 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis;
6876
6877         let (_payment_preimage_1, dust_hash, _payment_secret_1) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
6878         let (_payment_preimage_2, non_dust_hash, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
6879
6880         let as_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
6881         let bs_commitment_tx = get_local_commitment_txn!(nodes[1], chan.2);
6882
6883         // We revoked bs_commitment_tx
6884         if revoked {
6885                 let (payment_preimage_3, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
6886                 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
6887         }
6888
6889         let mut timeout_tx = Vec::new();
6890         if local {
6891                 // We fail dust-HTLC 1 by broadcast of local commitment tx
6892                 mine_transaction(&nodes[0], &as_commitment_tx[0]);
6893                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
6894                 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
6895                 expect_payment_failed!(nodes[0], dust_hash, false);
6896
6897                 connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS - ANTI_REORG_DELAY);
6898                 check_closed_broadcast!(nodes[0], true);
6899                 check_added_monitors!(nodes[0], 1);
6900                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
6901                 timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
6902                 assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
6903                 // We fail non-dust-HTLC 2 by broadcast of local HTLC-timeout tx on local commitment tx
6904                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
6905                 mine_transaction(&nodes[0], &timeout_tx[0]);
6906                 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
6907                 expect_payment_failed!(nodes[0], non_dust_hash, false);
6908         } else {
6909                 // We fail dust-HTLC 1 by broadcast of remote commitment tx. If revoked, fail also non-dust HTLC
6910                 mine_transaction(&nodes[0], &bs_commitment_tx[0]);
6911                 check_closed_broadcast!(nodes[0], true);
6912                 check_added_monitors!(nodes[0], 1);
6913                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
6914                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
6915
6916                 connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
6917                 timeout_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().drain(..)
6918                         .filter(|tx| tx.input[0].previous_output.txid == bs_commitment_tx[0].txid()).collect();
6919                 check_spends!(timeout_tx[0], bs_commitment_tx[0]);
6920                 // For both a revoked or non-revoked commitment transaction, after ANTI_REORG_DELAY the
6921                 // dust HTLC should have been failed.
6922                 expect_payment_failed!(nodes[0], dust_hash, false);
6923
6924                 if !revoked {
6925                         assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
6926                 } else {
6927                         assert_eq!(timeout_tx[0].lock_time.0, 12);
6928                 }
6929                 // We fail non-dust-HTLC 2 by broadcast of local timeout/revocation-claim tx
6930                 mine_transaction(&nodes[0], &timeout_tx[0]);
6931                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
6932                 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
6933                 expect_payment_failed!(nodes[0], non_dust_hash, false);
6934         }
6935 }
6936
6937 #[test]
6938 fn test_sweep_outbound_htlc_failure_update() {
6939         do_test_sweep_outbound_htlc_failure_update(false, true);
6940         do_test_sweep_outbound_htlc_failure_update(false, false);
6941         do_test_sweep_outbound_htlc_failure_update(true, false);
6942 }
6943
6944 #[test]
6945 fn test_user_configurable_csv_delay() {
6946         // We test our channel constructors yield errors when we pass them absurd csv delay
6947
6948         let mut low_our_to_self_config = UserConfig::default();
6949         low_our_to_self_config.channel_handshake_config.our_to_self_delay = 6;
6950         let mut high_their_to_self_config = UserConfig::default();
6951         high_their_to_self_config.channel_handshake_limits.their_to_self_delay = 100;
6952         let user_cfgs = [Some(high_their_to_self_config.clone()), None];
6953         let chanmon_cfgs = create_chanmon_cfgs(2);
6954         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6955         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
6956         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6957
6958         // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_outbound()
6959         if let Err(error) = Channel::new_outbound(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
6960                 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[1].node.init_features(), 1000000, 1000000, 0,
6961                 &low_our_to_self_config, 0, 42)
6962         {
6963                 match error {
6964                         APIError::APIMisuseError { err } => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); },
6965                         _ => panic!("Unexpected event"),
6966                 }
6967         } else { assert!(false) }
6968
6969         // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_from_req()
6970         nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
6971         let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
6972         open_channel.to_self_delay = 200;
6973         if let Err(error) = Channel::new_from_req(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
6974                 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
6975                 &low_our_to_self_config, 0, &nodes[0].logger, 42)
6976         {
6977                 match error {
6978                         ChannelError::Close(err) => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str()));  },
6979                         _ => panic!("Unexpected event"),
6980                 }
6981         } else { assert!(false); }
6982
6983         // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Chanel::accept_channel()
6984         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
6985         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
6986         let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
6987         accept_channel.to_self_delay = 200;
6988         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
6989         let reason_msg;
6990         if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] {
6991                 match action {
6992                         &ErrorAction::SendErrorMessage { ref msg } => {
6993                                 assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(msg.data.as_str()));
6994                                 reason_msg = msg.data.clone();
6995                         },
6996                         _ => { panic!(); }
6997                 }
6998         } else { panic!(); }
6999         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg });
7000
7001         // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Channel::new_from_req()
7002         nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
7003         let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
7004         open_channel.to_self_delay = 200;
7005         if let Err(error) = Channel::new_from_req(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
7006                 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
7007                 &high_their_to_self_config, 0, &nodes[0].logger, 42)
7008         {
7009                 match error {
7010                         ChannelError::Close(err) => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(err.as_str())); },
7011                         _ => panic!("Unexpected event"),
7012                 }
7013         } else { assert!(false); }
7014 }
7015
7016 #[test]
7017 fn test_check_htlc_underpaying() {
7018         // Send payment through A -> B but A is maliciously
7019         // sending a probe payment (i.e less than expected value0
7020         // to B, B should refuse payment.
7021
7022         let chanmon_cfgs = create_chanmon_cfgs(2);
7023         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7024         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7025         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7026
7027         // Create some initial channels
7028         create_announced_chan_between_nodes(&nodes, 0, 1);
7029
7030         let scorer = test_utils::TestScorer::new();
7031         let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
7032         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV).with_features(nodes[1].node.invoice_features());
7033         let route = get_route(&nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(), None, 10_000, TEST_FINAL_CLTV, nodes[0].logger, &scorer, &random_seed_bytes).unwrap();
7034         let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]);
7035         let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, None).unwrap();
7036         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
7037                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
7038         check_added_monitors!(nodes[0], 1);
7039
7040         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
7041         assert_eq!(events.len(), 1);
7042         let mut payment_event = SendEvent::from_event(events.pop().unwrap());
7043         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
7044         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
7045
7046         // Note that we first have to wait a random delay before processing the receipt of the HTLC,
7047         // and then will wait a second random delay before failing the HTLC back:
7048         expect_pending_htlcs_forwardable!(nodes[1]);
7049         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
7050
7051         // Node 3 is expecting payment of 100_000 but received 10_000,
7052         // it should fail htlc like we didn't know the preimage.
7053         nodes[1].node.process_pending_htlc_forwards();
7054
7055         let events = nodes[1].node.get_and_clear_pending_msg_events();
7056         assert_eq!(events.len(), 1);
7057         let (update_fail_htlc, commitment_signed) = match events[0] {
7058                 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
7059                         assert!(update_add_htlcs.is_empty());
7060                         assert!(update_fulfill_htlcs.is_empty());
7061                         assert_eq!(update_fail_htlcs.len(), 1);
7062                         assert!(update_fail_malformed_htlcs.is_empty());
7063                         assert!(update_fee.is_none());
7064                         (update_fail_htlcs[0].clone(), commitment_signed)
7065                 },
7066                 _ => panic!("Unexpected event"),
7067         };
7068         check_added_monitors!(nodes[1], 1);
7069
7070         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlc);
7071         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
7072
7073         // 10_000 msat as u64, followed by a height of CHAN_CONFIRM_DEPTH as u32
7074         let mut expected_failure_data = (10_000 as u64).to_be_bytes().to_vec();
7075         expected_failure_data.extend_from_slice(&CHAN_CONFIRM_DEPTH.to_be_bytes());
7076         expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000|15, &expected_failure_data[..]);
7077 }
7078
7079 #[test]
7080 fn test_announce_disable_channels() {
7081         // Create 2 channels between A and B. Disconnect B. Call timer_tick_occurred and check for generated
7082         // ChannelUpdate. Reconnect B, reestablish and check there is non-generated ChannelUpdate.
7083
7084         let chanmon_cfgs = create_chanmon_cfgs(2);
7085         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7086         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7087         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7088
7089         create_announced_chan_between_nodes(&nodes, 0, 1);
7090         create_announced_chan_between_nodes(&nodes, 1, 0);
7091         create_announced_chan_between_nodes(&nodes, 0, 1);
7092
7093         // Disconnect peers
7094         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
7095         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
7096
7097         nodes[0].node.timer_tick_occurred(); // Enabled -> DisabledStaged
7098         nodes[0].node.timer_tick_occurred(); // DisabledStaged -> Disabled
7099         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
7100         assert_eq!(msg_events.len(), 3);
7101         let mut chans_disabled = HashMap::new();
7102         for e in msg_events {
7103                 match e {
7104                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
7105                                 assert_eq!(msg.contents.flags & (1<<1), 1<<1); // The "channel disabled" bit should be set
7106                                 // Check that each channel gets updated exactly once
7107                                 if chans_disabled.insert(msg.contents.short_channel_id, msg.contents.timestamp).is_some() {
7108                                         panic!("Generated ChannelUpdate for wrong chan!");
7109                                 }
7110                         },
7111                         _ => panic!("Unexpected event"),
7112                 }
7113         }
7114         // Reconnect peers
7115         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
7116         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
7117         assert_eq!(reestablish_1.len(), 3);
7118         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
7119         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
7120         assert_eq!(reestablish_2.len(), 3);
7121
7122         // Reestablish chan_1
7123         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
7124         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7125         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
7126         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7127         // Reestablish chan_2
7128         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[1]);
7129         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7130         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[1]);
7131         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7132         // Reestablish chan_3
7133         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[2]);
7134         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7135         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[2]);
7136         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7137
7138         nodes[0].node.timer_tick_occurred();
7139         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7140         nodes[0].node.timer_tick_occurred();
7141         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
7142         assert_eq!(msg_events.len(), 3);
7143         for e in msg_events {
7144                 match e {
7145                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
7146                                 assert_eq!(msg.contents.flags & (1<<1), 0); // The "channel disabled" bit should be off
7147                                 match chans_disabled.remove(&msg.contents.short_channel_id) {
7148                                         // Each update should have a higher timestamp than the previous one, replacing
7149                                         // the old one.
7150                                         Some(prev_timestamp) => assert!(msg.contents.timestamp > prev_timestamp),
7151                                         None => panic!("Generated ChannelUpdate for wrong chan!"),
7152                                 }
7153                         },
7154                         _ => panic!("Unexpected event"),
7155                 }
7156         }
7157         // Check that each channel gets updated exactly once
7158         assert!(chans_disabled.is_empty());
7159 }
7160
7161 #[test]
7162 fn test_bump_penalty_txn_on_revoked_commitment() {
7163         // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to be sure
7164         // we're able to claim outputs on revoked commitment transaction before timelocks expiration
7165
7166         let chanmon_cfgs = create_chanmon_cfgs(2);
7167         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7168         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7169         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7170
7171         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7172
7173         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
7174         let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 30)
7175                 .with_features(nodes[0].node.invoice_features());
7176         let (route,_, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], payment_params, 3000000, 30);
7177         send_along_route(&nodes[1], route, &vec!(&nodes[0])[..], 3000000);
7178
7179         let revoked_txn = get_local_commitment_txn!(nodes[0], chan.2);
7180         // Revoked commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
7181         assert_eq!(revoked_txn[0].output.len(), 4);
7182         assert_eq!(revoked_txn[0].input.len(), 1);
7183         assert_eq!(revoked_txn[0].input[0].previous_output.txid, chan.3.txid());
7184         let revoked_txid = revoked_txn[0].txid();
7185
7186         let mut penalty_sum = 0;
7187         for outp in revoked_txn[0].output.iter() {
7188                 if outp.script_pubkey.is_v0_p2wsh() {
7189                         penalty_sum += outp.value;
7190                 }
7191         }
7192
7193         // Connect blocks to change height_timer range to see if we use right soonest_timelock
7194         let header_114 = connect_blocks(&nodes[1], 14);
7195
7196         // Actually revoke tx by claiming a HTLC
7197         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
7198         let header = BlockHeader { version: 0x20000000, prev_blockhash: header_114, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
7199         connect_block(&nodes[1], &Block { header, txdata: vec![revoked_txn[0].clone()] });
7200         check_added_monitors!(nodes[1], 1);
7201
7202         // One or more justice tx should have been broadcast, check it
7203         let penalty_1;
7204         let feerate_1;
7205         {
7206                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7207                 assert_eq!(node_txn.len(), 1); // justice tx (broadcasted from ChannelMonitor)
7208                 assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7209                 assert_eq!(node_txn[0].output.len(), 1);
7210                 check_spends!(node_txn[0], revoked_txn[0]);
7211                 let fee_1 = penalty_sum - node_txn[0].output[0].value;
7212                 feerate_1 = fee_1 * 1000 / node_txn[0].weight() as u64;
7213                 penalty_1 = node_txn[0].txid();
7214                 node_txn.clear();
7215         };
7216
7217         // After exhaustion of height timer, a new bumped justice tx should have been broadcast, check it
7218         connect_blocks(&nodes[1], 15);
7219         let mut penalty_2 = penalty_1;
7220         let mut feerate_2 = 0;
7221         {
7222                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7223                 assert_eq!(node_txn.len(), 1);
7224                 if node_txn[0].input[0].previous_output.txid == revoked_txid {
7225                         assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7226                         assert_eq!(node_txn[0].output.len(), 1);
7227                         check_spends!(node_txn[0], revoked_txn[0]);
7228                         penalty_2 = node_txn[0].txid();
7229                         // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
7230                         assert_ne!(penalty_2, penalty_1);
7231                         let fee_2 = penalty_sum - node_txn[0].output[0].value;
7232                         feerate_2 = fee_2 * 1000 / node_txn[0].weight() as u64;
7233                         // Verify 25% bump heuristic
7234                         assert!(feerate_2 * 100 >= feerate_1 * 125);
7235                         node_txn.clear();
7236                 }
7237         }
7238         assert_ne!(feerate_2, 0);
7239
7240         // After exhaustion of height timer for a 2nd time, a new bumped justice tx should have been broadcast, check it
7241         connect_blocks(&nodes[1], 1);
7242         let penalty_3;
7243         let mut feerate_3 = 0;
7244         {
7245                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7246                 assert_eq!(node_txn.len(), 1);
7247                 if node_txn[0].input[0].previous_output.txid == revoked_txid {
7248                         assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7249                         assert_eq!(node_txn[0].output.len(), 1);
7250                         check_spends!(node_txn[0], revoked_txn[0]);
7251                         penalty_3 = node_txn[0].txid();
7252                         // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
7253                         assert_ne!(penalty_3, penalty_2);
7254                         let fee_3 = penalty_sum - node_txn[0].output[0].value;
7255                         feerate_3 = fee_3 * 1000 / node_txn[0].weight() as u64;
7256                         // Verify 25% bump heuristic
7257                         assert!(feerate_3 * 100 >= feerate_2 * 125);
7258                         node_txn.clear();
7259                 }
7260         }
7261         assert_ne!(feerate_3, 0);
7262
7263         nodes[1].node.get_and_clear_pending_events();
7264         nodes[1].node.get_and_clear_pending_msg_events();
7265 }
7266
7267 #[test]
7268 fn test_bump_penalty_txn_on_revoked_htlcs() {
7269         // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to sure
7270         // we're able to claim outputs on revoked HTLC transactions before timelocks expiration
7271
7272         let mut chanmon_cfgs = create_chanmon_cfgs(2);
7273         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
7274         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7275         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7276         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7277
7278         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7279         // Lock HTLC in both directions (using a slightly lower CLTV delay to provide timely RBF bumps)
7280         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 50).with_features(nodes[1].node.invoice_features());
7281         let scorer = test_utils::TestScorer::new();
7282         let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
7283         let route = get_route(&nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(), None,
7284                 3_000_000, 50, nodes[0].logger, &scorer, &random_seed_bytes).unwrap();
7285         let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 3_000_000).0;
7286         let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 50).with_features(nodes[0].node.invoice_features());
7287         let route = get_route(&nodes[1].node.get_our_node_id(), &payment_params, &nodes[1].network_graph.read_only(), None,
7288                 3_000_000, 50, nodes[0].logger, &scorer, &random_seed_bytes).unwrap();
7289         send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000);
7290
7291         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
7292         assert_eq!(revoked_local_txn[0].input.len(), 1);
7293         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
7294
7295         // Revoke local commitment tx
7296         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
7297
7298         let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
7299         // B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx
7300         connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] });
7301         check_closed_broadcast!(nodes[1], true);
7302         check_added_monitors!(nodes[1], 1);
7303         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
7304         connect_blocks(&nodes[1], 49); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
7305
7306         let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
7307         assert_eq!(revoked_htlc_txn.len(), 2);
7308
7309         assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
7310         assert_eq!(revoked_htlc_txn[0].input.len(), 1);
7311         check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]);
7312
7313         assert_eq!(revoked_htlc_txn[1].input.len(), 1);
7314         assert_eq!(revoked_htlc_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
7315         assert_eq!(revoked_htlc_txn[1].output.len(), 1);
7316         check_spends!(revoked_htlc_txn[1], revoked_local_txn[0]);
7317
7318         // Broadcast set of revoked txn on A
7319         let hash_128 = connect_blocks(&nodes[0], 40);
7320         let header_11 = BlockHeader { version: 0x20000000, prev_blockhash: hash_128, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
7321         connect_block(&nodes[0], &Block { header: header_11, txdata: vec![revoked_local_txn[0].clone()] });
7322         let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_11.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
7323         connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()] });
7324         let events = nodes[0].node.get_and_clear_pending_events();
7325         expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
7326         match events.last().unwrap() {
7327                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
7328                 _ => panic!("Unexpected event"),
7329         }
7330         let first;
7331         let feerate_1;
7332         let penalty_txn;
7333         {
7334                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7335                 assert_eq!(node_txn.len(), 4); // 3 penalty txn on revoked commitment tx + 1 penalty tnx on revoked HTLC txn
7336                 // Verify claim tx are spending revoked HTLC txn
7337
7338                 // node_txn 0-2 each spend a separate revoked output from revoked_local_txn[0]
7339                 // Note that node_txn[0] and node_txn[1] are bogus - they double spend the revoked_htlc_txn
7340                 // which are included in the same block (they are broadcasted because we scan the
7341                 // transactions linearly and generate claims as we go, they likely should be removed in the
7342                 // future).
7343                 assert_eq!(node_txn[0].input.len(), 1);
7344                 check_spends!(node_txn[0], revoked_local_txn[0]);
7345                 assert_eq!(node_txn[1].input.len(), 1);
7346                 check_spends!(node_txn[1], revoked_local_txn[0]);
7347                 assert_eq!(node_txn[2].input.len(), 1);
7348                 check_spends!(node_txn[2], revoked_local_txn[0]);
7349
7350                 // Each of the three justice transactions claim a separate (single) output of the three
7351                 // available, which we check here:
7352                 assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
7353                 assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output);
7354                 assert_ne!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output);
7355
7356                 assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[1].input[0].previous_output);
7357                 assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
7358
7359                 // node_txn[3] spends the revoked outputs from the revoked_htlc_txn (which only have one
7360                 // output, checked above).
7361                 assert_eq!(node_txn[3].input.len(), 2);
7362                 assert_eq!(node_txn[3].output.len(), 1);
7363                 check_spends!(node_txn[3], revoked_htlc_txn[0], revoked_htlc_txn[1]);
7364
7365                 first = node_txn[3].txid();
7366                 // Store both feerates for later comparison
7367                 let fee_1 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[3].output[0].value;
7368                 feerate_1 = fee_1 * 1000 / node_txn[3].weight() as u64;
7369                 penalty_txn = vec![node_txn[2].clone()];
7370                 node_txn.clear();
7371         }
7372
7373         // Connect one more block to see if bumped penalty are issued for HTLC txn
7374         let header_130 = BlockHeader { version: 0x20000000, prev_blockhash: header_129.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
7375         connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn });
7376         let header_131 = BlockHeader { version: 0x20000000, prev_blockhash: header_130.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
7377         connect_block(&nodes[0], &Block { header: header_131, txdata: Vec::new() });
7378
7379         // Few more blocks to confirm penalty txn
7380         connect_blocks(&nodes[0], 4);
7381         assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
7382         let header_144 = connect_blocks(&nodes[0], 9);
7383         let node_txn = {
7384                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7385                 assert_eq!(node_txn.len(), 1);
7386
7387                 assert_eq!(node_txn[0].input.len(), 2);
7388                 check_spends!(node_txn[0], revoked_htlc_txn[0], revoked_htlc_txn[1]);
7389                 // Verify bumped tx is different and 25% bump heuristic
7390                 assert_ne!(first, node_txn[0].txid());
7391                 let fee_2 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[0].output[0].value;
7392                 let feerate_2 = fee_2 * 1000 / node_txn[0].weight() as u64;
7393                 assert!(feerate_2 * 100 > feerate_1 * 125);
7394                 let txn = vec![node_txn[0].clone()];
7395                 node_txn.clear();
7396                 txn
7397         };
7398         // Broadcast claim txn and confirm blocks to avoid further bumps on this outputs
7399         let header_145 = BlockHeader { version: 0x20000000, prev_blockhash: header_144, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
7400         connect_block(&nodes[0], &Block { header: header_145, txdata: node_txn });
7401         connect_blocks(&nodes[0], 20);
7402         {
7403                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7404                 // We verify than no new transaction has been broadcast because previously
7405                 // we were buggy on this exact behavior by not tracking for monitoring remote HTLC outputs (see #411)
7406                 // which means we wouldn't see a spend of them by a justice tx and bumped justice tx
7407                 // were generated forever instead of safe cleaning after confirmation and ANTI_REORG_SAFE_DELAY blocks.
7408                 // Enforce spending of revoked htlc output by claiming transaction remove request as expected and dry
7409                 // up bumped justice generation.
7410                 assert_eq!(node_txn.len(), 0);
7411                 node_txn.clear();
7412         }
7413         check_closed_broadcast!(nodes[0], true);
7414         check_added_monitors!(nodes[0], 1);
7415 }
7416
7417 #[test]
7418 fn test_bump_penalty_txn_on_remote_commitment() {
7419         // In case of claim txn with too low feerates for getting into mempools, RBF-bump them to be sure
7420         // we're able to claim outputs on remote commitment transaction before timelocks expiration
7421
7422         // Create 2 HTLCs
7423         // Provide preimage for one
7424         // Check aggregation
7425
7426         let chanmon_cfgs = create_chanmon_cfgs(2);
7427         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7428         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7429         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7430
7431         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7432         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
7433         route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
7434
7435         // Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
7436         let remote_txn = get_local_commitment_txn!(nodes[0], chan.2);
7437         assert_eq!(remote_txn[0].output.len(), 4);
7438         assert_eq!(remote_txn[0].input.len(), 1);
7439         assert_eq!(remote_txn[0].input[0].previous_output.txid, chan.3.txid());
7440
7441         // Claim a HTLC without revocation (provide B monitor with preimage)
7442         nodes[1].node.claim_funds(payment_preimage);
7443         expect_payment_claimed!(nodes[1], payment_hash, 3_000_000);
7444         mine_transaction(&nodes[1], &remote_txn[0]);
7445         check_added_monitors!(nodes[1], 2);
7446         connect_blocks(&nodes[1], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
7447
7448         // One or more claim tx should have been broadcast, check it
7449         let timeout;
7450         let preimage;
7451         let preimage_bump;
7452         let feerate_timeout;
7453         let feerate_preimage;
7454         {
7455                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7456                 // 3 transactions including:
7457                 //   preimage and timeout sweeps from remote commitment + preimage sweep bump
7458                 assert_eq!(node_txn.len(), 3);
7459                 assert_eq!(node_txn[0].input.len(), 1);
7460                 assert_eq!(node_txn[1].input.len(), 1);
7461                 assert_eq!(node_txn[2].input.len(), 1);
7462                 check_spends!(node_txn[0], remote_txn[0]);
7463                 check_spends!(node_txn[1], remote_txn[0]);
7464                 check_spends!(node_txn[2], remote_txn[0]);
7465
7466                 preimage = node_txn[0].txid();
7467                 let index = node_txn[0].input[0].previous_output.vout;
7468                 let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
7469                 feerate_preimage = fee * 1000 / node_txn[0].weight() as u64;
7470
7471                 let (preimage_bump_tx, timeout_tx) = if node_txn[2].input[0].previous_output == node_txn[0].input[0].previous_output {
7472                         (node_txn[2].clone(), node_txn[1].clone())
7473                 } else {
7474                         (node_txn[1].clone(), node_txn[2].clone())
7475                 };
7476
7477                 preimage_bump = preimage_bump_tx;
7478                 check_spends!(preimage_bump, remote_txn[0]);
7479                 assert_eq!(node_txn[0].input[0].previous_output, preimage_bump.input[0].previous_output);
7480
7481                 timeout = timeout_tx.txid();
7482                 let index = timeout_tx.input[0].previous_output.vout;
7483                 let fee = remote_txn[0].output[index as usize].value - timeout_tx.output[0].value;
7484                 feerate_timeout = fee * 1000 / timeout_tx.weight() as u64;
7485
7486                 node_txn.clear();
7487         };
7488         assert_ne!(feerate_timeout, 0);
7489         assert_ne!(feerate_preimage, 0);
7490
7491         // After exhaustion of height timer, new bumped claim txn should have been broadcast, check it
7492         connect_blocks(&nodes[1], 15);
7493         {
7494                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7495                 assert_eq!(node_txn.len(), 1);
7496                 assert_eq!(node_txn[0].input.len(), 1);
7497                 assert_eq!(preimage_bump.input.len(), 1);
7498                 check_spends!(node_txn[0], remote_txn[0]);
7499                 check_spends!(preimage_bump, remote_txn[0]);
7500
7501                 let index = preimage_bump.input[0].previous_output.vout;
7502                 let fee = remote_txn[0].output[index as usize].value - preimage_bump.output[0].value;
7503                 let new_feerate = fee * 1000 / preimage_bump.weight() as u64;
7504                 assert!(new_feerate * 100 > feerate_timeout * 125);
7505                 assert_ne!(timeout, preimage_bump.txid());
7506
7507                 let index = node_txn[0].input[0].previous_output.vout;
7508                 let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
7509                 let new_feerate = fee * 1000 / node_txn[0].weight() as u64;
7510                 assert!(new_feerate * 100 > feerate_preimage * 125);
7511                 assert_ne!(preimage, node_txn[0].txid());
7512
7513                 node_txn.clear();
7514         }
7515
7516         nodes[1].node.get_and_clear_pending_events();
7517         nodes[1].node.get_and_clear_pending_msg_events();
7518 }
7519
7520 #[test]
7521 fn test_counterparty_raa_skip_no_crash() {
7522         // Previously, if our counterparty sent two RAAs in a row without us having provided a
7523         // commitment transaction, we would have happily carried on and provided them the next
7524         // commitment transaction based on one RAA forward. This would probably eventually have led to
7525         // channel closure, but it would not have resulted in funds loss. Still, our
7526         // EnforcingSigner would have panicked as it doesn't like jumps into the future. Here, we
7527         // check simply that the channel is closed in response to such an RAA, but don't check whether
7528         // we decide to punish our counterparty for revoking their funds (as we don't currently
7529         // implement that).
7530         let chanmon_cfgs = create_chanmon_cfgs(2);
7531         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7532         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7533         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7534         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
7535
7536         let per_commitment_secret;
7537         let next_per_commitment_point;
7538         {
7539                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
7540                 let mut guard = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
7541                 let keys = guard.channel_by_id.get_mut(&channel_id).unwrap().get_signer();
7542
7543                 const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
7544
7545                 // Make signer believe we got a counterparty signature, so that it allows the revocation
7546                 keys.get_enforcement_state().last_holder_commitment -= 1;
7547                 per_commitment_secret = keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
7548
7549                 // Must revoke without gaps
7550                 keys.get_enforcement_state().last_holder_commitment -= 1;
7551                 keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
7552
7553                 keys.get_enforcement_state().last_holder_commitment -= 1;
7554                 next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
7555                         &SecretKey::from_slice(&keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
7556         }
7557
7558         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(),
7559                 &msgs::RevokeAndACK { channel_id, per_commitment_secret, next_per_commitment_point });
7560         assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
7561         check_added_monitors!(nodes[1], 1);
7562         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() });
7563 }
7564
7565 #[test]
7566 fn test_bump_txn_sanitize_tracking_maps() {
7567         // Sanitizing pendning_claim_request and claimable_outpoints used to be buggy,
7568         // verify we clean then right after expiration of ANTI_REORG_DELAY.
7569
7570         let chanmon_cfgs = create_chanmon_cfgs(2);
7571         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7572         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7573         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7574
7575         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7576         // Lock HTLC in both directions
7577         let (payment_preimage_1, _, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000);
7578         let (_, payment_hash_2, _) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000);
7579
7580         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
7581         assert_eq!(revoked_local_txn[0].input.len(), 1);
7582         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
7583
7584         // Revoke local commitment tx
7585         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
7586
7587         // Broadcast set of revoked txn on A
7588         connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH);
7589         expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[0], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
7590         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
7591
7592         mine_transaction(&nodes[0], &revoked_local_txn[0]);
7593         check_closed_broadcast!(nodes[0], true);
7594         check_added_monitors!(nodes[0], 1);
7595         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
7596         let penalty_txn = {
7597                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7598                 assert_eq!(node_txn.len(), 3); //ChannelMonitor: justice txn * 3
7599                 check_spends!(node_txn[0], revoked_local_txn[0]);
7600                 check_spends!(node_txn[1], revoked_local_txn[0]);
7601                 check_spends!(node_txn[2], revoked_local_txn[0]);
7602                 let penalty_txn = vec![node_txn[0].clone(), node_txn[1].clone(), node_txn[2].clone()];
7603                 node_txn.clear();
7604                 penalty_txn
7605         };
7606         let header_130 = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
7607         connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn });
7608         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7609         {
7610                 let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(OutPoint { txid: chan.3.txid(), index: 0 }).unwrap();
7611                 assert!(monitor.inner.lock().unwrap().onchain_tx_handler.pending_claim_requests.is_empty());
7612                 assert!(monitor.inner.lock().unwrap().onchain_tx_handler.claimable_outpoints.is_empty());
7613         }
7614 }
7615
7616 #[test]
7617 fn test_pending_claimed_htlc_no_balance_underflow() {
7618         // Tests that if we have a pending outbound HTLC as well as a claimed-but-not-fully-removed
7619         // HTLC we will not underflow when we call `Channel::get_balance_msat()`.
7620         let chanmon_cfgs = create_chanmon_cfgs(2);
7621         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7622         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7623         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7624         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0);
7625
7626         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_010_000);
7627         nodes[1].node.claim_funds(payment_preimage);
7628         expect_payment_claimed!(nodes[1], payment_hash, 1_010_000);
7629         check_added_monitors!(nodes[1], 1);
7630         let fulfill_ev = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
7631
7632         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &fulfill_ev.update_fulfill_htlcs[0]);
7633         expect_payment_sent_without_paths!(nodes[0], payment_preimage);
7634         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &fulfill_ev.commitment_signed);
7635         check_added_monitors!(nodes[0], 1);
7636         let (_raa, _cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
7637
7638         // At this point nodes[1] has received 1,010k msat (10k msat more than their reserve) and can
7639         // send an HTLC back (though it will go in the holding cell). Send an HTLC back and check we
7640         // can get our balance.
7641
7642         // Get a route from nodes[1] to nodes[0] by getting a route going the other way and then flip
7643         // the public key of the only hop. This works around ChannelDetails not showing the
7644         // almost-claimed HTLC as available balance.
7645         let (mut route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 10_000);
7646         route.payment_params = None; // This is all wrong, but unnecessary
7647         route.paths[0][0].pubkey = nodes[0].node.get_our_node_id();
7648         let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[0]);
7649         nodes[1].node.send_payment_with_route(&route, payment_hash_2,
7650                 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
7651
7652         assert_eq!(nodes[1].node.list_channels()[0].balance_msat, 1_000_000);
7653 }
7654
7655 #[test]
7656 fn test_channel_conf_timeout() {
7657         // Tests that, for inbound channels, we give up on them if the funding transaction does not
7658         // confirm within 2016 blocks, as recommended by BOLT 2.
7659         let chanmon_cfgs = create_chanmon_cfgs(2);
7660         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7661         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7662         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7663
7664         let _funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 100_000);
7665
7666         // The outbound node should wait forever for confirmation:
7667         // This matches `channel::FUNDING_CONF_DEADLINE_BLOCKS` and BOLT 2's suggested timeout, thus is
7668         // copied here instead of directly referencing the constant.
7669         connect_blocks(&nodes[0], 2016);
7670         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7671
7672         // The inbound node should fail the channel after exactly 2016 blocks
7673         connect_blocks(&nodes[1], 2015);
7674         check_added_monitors!(nodes[1], 0);
7675         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7676
7677         connect_blocks(&nodes[1], 1);
7678         check_added_monitors!(nodes[1], 1);
7679         check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut);
7680         let close_ev = nodes[1].node.get_and_clear_pending_msg_events();
7681         assert_eq!(close_ev.len(), 1);
7682         match close_ev[0] {
7683                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, ref node_id } => {
7684                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
7685                         assert_eq!(msg.data, "Channel closed because funding transaction failed to confirm within 2016 blocks");
7686                 },
7687                 _ => panic!("Unexpected event"),
7688         }
7689 }
7690
7691 #[test]
7692 fn test_override_channel_config() {
7693         let chanmon_cfgs = create_chanmon_cfgs(2);
7694         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7695         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7696         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7697
7698         // Node0 initiates a channel to node1 using the override config.
7699         let mut override_config = UserConfig::default();
7700         override_config.channel_handshake_config.our_to_self_delay = 200;
7701
7702         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, Some(override_config)).unwrap();
7703
7704         // Assert the channel created by node0 is using the override config.
7705         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7706         assert_eq!(res.channel_flags, 0);
7707         assert_eq!(res.to_self_delay, 200);
7708 }
7709
7710 #[test]
7711 fn test_override_0msat_htlc_minimum() {
7712         let mut zero_config = UserConfig::default();
7713         zero_config.channel_handshake_config.our_htlc_minimum_msat = 0;
7714         let chanmon_cfgs = create_chanmon_cfgs(2);
7715         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7716         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(zero_config.clone())]);
7717         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7718
7719         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, Some(zero_config)).unwrap();
7720         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7721         assert_eq!(res.htlc_minimum_msat, 1);
7722
7723         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
7724         let res = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
7725         assert_eq!(res.htlc_minimum_msat, 1);
7726 }
7727
7728 #[test]
7729 fn test_channel_update_has_correct_htlc_maximum_msat() {
7730         // Tests that the `ChannelUpdate` message has the correct values for `htlc_maximum_msat` set.
7731         // Bolt 7 specifies that if present `htlc_maximum_msat`:
7732         // 1. MUST be set to less than or equal to the channel capacity. In LDK, this is capped to
7733         // 90% of the `channel_value`.
7734         // 2. MUST be set to less than or equal to the `max_htlc_value_in_flight_msat` received from the peer.
7735
7736         let mut config_30_percent = UserConfig::default();
7737         config_30_percent.channel_handshake_config.announced_channel = true;
7738         config_30_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 30;
7739         let mut config_50_percent = UserConfig::default();
7740         config_50_percent.channel_handshake_config.announced_channel = true;
7741         config_50_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 50;
7742         let mut config_95_percent = UserConfig::default();
7743         config_95_percent.channel_handshake_config.announced_channel = true;
7744         config_95_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 95;
7745         let mut config_100_percent = UserConfig::default();
7746         config_100_percent.channel_handshake_config.announced_channel = true;
7747         config_100_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100;
7748
7749         let chanmon_cfgs = create_chanmon_cfgs(4);
7750         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
7751         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[Some(config_30_percent), Some(config_50_percent), Some(config_95_percent), Some(config_100_percent)]);
7752         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
7753
7754         let channel_value_satoshis = 100000;
7755         let channel_value_msat = channel_value_satoshis * 1000;
7756         let channel_value_30_percent_msat = (channel_value_msat as f64 * 0.3) as u64;
7757         let channel_value_50_percent_msat = (channel_value_msat as f64 * 0.5) as u64;
7758         let channel_value_90_percent_msat = (channel_value_msat as f64 * 0.9) as u64;
7759
7760         let (node_0_chan_update, node_1_chan_update, _, _)  = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value_satoshis, 10001);
7761         let (node_2_chan_update, node_3_chan_update, _, _)  = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, channel_value_satoshis, 10001);
7762
7763         // Assert that `node[0]`'s `ChannelUpdate` is capped at 50 percent of the `channel_value`, as
7764         // that's the value of `node[1]`'s `holder_max_htlc_value_in_flight_msat`.
7765         assert_eq!(node_0_chan_update.contents.htlc_maximum_msat, channel_value_50_percent_msat);
7766         // Assert that `node[1]`'s `ChannelUpdate` is capped at 30 percent of the `channel_value`, as
7767         // that's the value of `node[0]`'s `holder_max_htlc_value_in_flight_msat`.
7768         assert_eq!(node_1_chan_update.contents.htlc_maximum_msat, channel_value_30_percent_msat);
7769
7770         // Assert that `node[2]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
7771         // the value of `node[3]`'s `holder_max_htlc_value_in_flight_msat` (100%), exceeds 90% of the
7772         // `channel_value`.
7773         assert_eq!(node_2_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
7774         // Assert that `node[3]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
7775         // the value of `node[2]`'s `holder_max_htlc_value_in_flight_msat` (95%), exceeds 90% of the
7776         // `channel_value`.
7777         assert_eq!(node_3_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
7778 }
7779
7780 #[test]
7781 fn test_manually_accept_inbound_channel_request() {
7782         let mut manually_accept_conf = UserConfig::default();
7783         manually_accept_conf.manually_accept_inbound_channels = true;
7784         let chanmon_cfgs = create_chanmon_cfgs(2);
7785         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7786         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
7787         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7788
7789         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap();
7790         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7791
7792         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
7793
7794         // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
7795         // accepting the inbound channel request.
7796         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
7797
7798         let events = nodes[1].node.get_and_clear_pending_events();
7799         match events[0] {
7800                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
7801                         nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23).unwrap();
7802                 }
7803                 _ => panic!("Unexpected event"),
7804         }
7805
7806         let accept_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
7807         assert_eq!(accept_msg_ev.len(), 1);
7808
7809         match accept_msg_ev[0] {
7810                 MessageSendEvent::SendAcceptChannel { ref node_id, .. } => {
7811                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
7812                 }
7813                 _ => panic!("Unexpected event"),
7814         }
7815
7816         nodes[1].node.force_close_broadcasting_latest_txn(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
7817
7818         let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
7819         assert_eq!(close_msg_ev.len(), 1);
7820
7821         let events = nodes[1].node.get_and_clear_pending_events();
7822         match events[0] {
7823                 Event::ChannelClosed { user_channel_id, .. } => {
7824                         assert_eq!(user_channel_id, 23);
7825                 }
7826                 _ => panic!("Unexpected event"),
7827         }
7828 }
7829
7830 #[test]
7831 fn test_manually_reject_inbound_channel_request() {
7832         let mut manually_accept_conf = UserConfig::default();
7833         manually_accept_conf.manually_accept_inbound_channels = true;
7834         let chanmon_cfgs = create_chanmon_cfgs(2);
7835         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7836         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
7837         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7838
7839         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap();
7840         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7841
7842         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
7843
7844         // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
7845         // rejecting the inbound channel request.
7846         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
7847
7848         let events = nodes[1].node.get_and_clear_pending_events();
7849         match events[0] {
7850                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
7851                         nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
7852                 }
7853                 _ => panic!("Unexpected event"),
7854         }
7855
7856         let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
7857         assert_eq!(close_msg_ev.len(), 1);
7858
7859         match close_msg_ev[0] {
7860                 MessageSendEvent::HandleError { ref node_id, .. } => {
7861                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
7862                 }
7863                 _ => panic!("Unexpected event"),
7864         }
7865         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
7866 }
7867
7868 #[test]
7869 fn test_reject_funding_before_inbound_channel_accepted() {
7870         // This tests that when `UserConfig::manually_accept_inbound_channels` is set to true, inbound
7871         // channels must to be manually accepted through `ChannelManager::accept_inbound_channel` by
7872         // the node operator before the counterparty sends a `FundingCreated` message. If a
7873         // `FundingCreated` message is received before the channel is accepted, it should be rejected
7874         // and the channel should be closed.
7875         let mut manually_accept_conf = UserConfig::default();
7876         manually_accept_conf.manually_accept_inbound_channels = true;
7877         let chanmon_cfgs = create_chanmon_cfgs(2);
7878         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7879         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
7880         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7881
7882         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap();
7883         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7884         let temp_channel_id = res.temporary_channel_id;
7885
7886         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
7887
7888         // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in the `msg_events`.
7889         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
7890
7891         // Clear the `Event::OpenChannelRequest` event without responding to the request.
7892         nodes[1].node.get_and_clear_pending_events();
7893
7894         // Get the `AcceptChannel` message of `nodes[1]` without calling
7895         // `ChannelManager::accept_inbound_channel`, which generates a
7896         // `MessageSendEvent::SendAcceptChannel` event. The message is passed to `nodes[0]`
7897         // `handle_accept_channel`, which is required in order for `create_funding_transaction` to
7898         // succeed when `nodes[0]` is passed to it.
7899         let accept_chan_msg = {
7900                 let mut node_1_per_peer_lock;
7901                 let mut node_1_peer_state_lock;
7902                 let channel =  get_channel_ref!(&nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, temp_channel_id);
7903                 channel.get_accept_channel_message()
7904         };
7905         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
7906
7907         let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
7908
7909         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
7910         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
7911
7912         // The `funding_created_msg` should be rejected by `nodes[1]` as it hasn't accepted the channel
7913         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
7914
7915         let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
7916         assert_eq!(close_msg_ev.len(), 1);
7917
7918         let expected_err = "FundingCreated message received before the channel was accepted";
7919         match close_msg_ev[0] {
7920                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, ref node_id, } => {
7921                         assert_eq!(msg.channel_id, temp_channel_id);
7922                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
7923                         assert_eq!(msg.data, expected_err);
7924                 }
7925                 _ => panic!("Unexpected event"),
7926         }
7927
7928         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() });
7929 }
7930
7931 #[test]
7932 fn test_can_not_accept_inbound_channel_twice() {
7933         let mut manually_accept_conf = UserConfig::default();
7934         manually_accept_conf.manually_accept_inbound_channels = true;
7935         let chanmon_cfgs = create_chanmon_cfgs(2);
7936         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7937         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
7938         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7939
7940         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap();
7941         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7942
7943         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
7944
7945         // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
7946         // accepting the inbound channel request.
7947         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
7948
7949         let events = nodes[1].node.get_and_clear_pending_events();
7950         match events[0] {
7951                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
7952                         nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
7953                         let api_res = nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0);
7954                         match api_res {
7955                                 Err(APIError::APIMisuseError { err }) => {
7956                                         assert_eq!(err, "The channel isn't currently awaiting to be accepted.");
7957                                 },
7958                                 Ok(_) => panic!("Channel shouldn't be possible to be accepted twice"),
7959                                 Err(_) => panic!("Unexpected Error"),
7960                         }
7961                 }
7962                 _ => panic!("Unexpected event"),
7963         }
7964
7965         // Ensure that the channel wasn't closed after attempting to accept it twice.
7966         let accept_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
7967         assert_eq!(accept_msg_ev.len(), 1);
7968
7969         match accept_msg_ev[0] {
7970                 MessageSendEvent::SendAcceptChannel { ref node_id, .. } => {
7971                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
7972                 }
7973                 _ => panic!("Unexpected event"),
7974         }
7975 }
7976
7977 #[test]
7978 fn test_can_not_accept_unknown_inbound_channel() {
7979         let chanmon_cfg = create_chanmon_cfgs(2);
7980         let node_cfg = create_node_cfgs(2, &chanmon_cfg);
7981         let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
7982         let nodes = create_network(2, &node_cfg, &node_chanmgr);
7983
7984         let unknown_channel_id = [0; 32];
7985         let api_res = nodes[0].node.accept_inbound_channel(&unknown_channel_id, &nodes[1].node.get_our_node_id(), 0);
7986         match api_res {
7987                 Err(APIError::ChannelUnavailable { err }) => {
7988                         assert_eq!(err, format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(unknown_channel_id), nodes[1].node.get_our_node_id()));
7989                 },
7990                 Ok(_) => panic!("It shouldn't be possible to accept an unkown channel"),
7991                 Err(_) => panic!("Unexpected Error"),
7992         }
7993 }
7994
7995 #[test]
7996 fn test_onion_value_mpp_set_calculation() {
7997         // Test that we use the onion value `amt_to_forward` when
7998         // calculating whether we've reached the `total_msat` of an MPP
7999         // by having a routing node forward more than `amt_to_forward`
8000         // and checking that the receiving node doesn't generate
8001         // a PaymentClaimable event too early
8002         let node_count = 4;
8003         let chanmon_cfgs = create_chanmon_cfgs(node_count);
8004         let node_cfgs = create_node_cfgs(node_count, &chanmon_cfgs);
8005         let node_chanmgrs = create_node_chanmgrs(node_count, &node_cfgs, &vec![None; node_count]);
8006         let mut nodes = create_network(node_count, &node_cfgs, &node_chanmgrs);
8007
8008         let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8009         let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
8010         let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
8011         let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
8012
8013         let total_msat = 100_000;
8014         let expected_paths: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]];
8015         let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], total_msat);
8016         let sample_path = route.paths.pop().unwrap();
8017
8018         let mut path_1 = sample_path.clone();
8019         path_1[0].pubkey = nodes[1].node.get_our_node_id();
8020         path_1[0].short_channel_id = chan_1_id;
8021         path_1[1].pubkey = nodes[3].node.get_our_node_id();
8022         path_1[1].short_channel_id = chan_3_id;
8023         path_1[1].fee_msat = 100_000;
8024         route.paths.push(path_1);
8025
8026         let mut path_2 = sample_path.clone();
8027         path_2[0].pubkey = nodes[2].node.get_our_node_id();
8028         path_2[0].short_channel_id = chan_2_id;
8029         path_2[1].pubkey = nodes[3].node.get_our_node_id();
8030         path_2[1].short_channel_id = chan_4_id;
8031         path_2[1].fee_msat = 1_000;
8032         route.paths.push(path_2);
8033
8034         // Send payment
8035         let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes());
8036         let onion_session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash, Some(our_payment_secret), payment_id, &route).unwrap();
8037         nodes[0].node.test_send_payment_internal(&route, our_payment_hash, &Some(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap();
8038         check_added_monitors!(nodes[0], expected_paths.len());
8039
8040         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8041         assert_eq!(events.len(), expected_paths.len());
8042
8043         // First path
8044         let ev = remove_first_msg_event_to_node(&expected_paths[0][0].node.get_our_node_id(), &mut events);
8045         let mut payment_event = SendEvent::from_event(ev);
8046         let mut prev_node = &nodes[0];
8047
8048         for (idx, &node) in expected_paths[0].iter().enumerate() {
8049                 assert_eq!(node.node.get_our_node_id(), payment_event.node_id);
8050
8051                 if idx == 0 { // routing node
8052                         let session_priv = [3; 32];
8053                         let height = nodes[0].best_block_info().1;
8054                         let session_priv = SecretKey::from_slice(&session_priv).unwrap();
8055                         let mut onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
8056                         let (mut onion_payloads, _, _) = onion_utils::build_onion_payloads(&route.paths[0], 100_000, &Some(our_payment_secret), height + 1, &None).unwrap();
8057                         // Edit amt_to_forward to simulate the sender having set
8058                         // the final amount and the routing node taking less fee
8059                         onion_payloads[1].amt_to_forward = 99_000;
8060                         let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash);
8061                         payment_event.msgs[0].onion_routing_packet = new_onion_packet;
8062                 }
8063
8064                 node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]);
8065                 check_added_monitors!(node, 0);
8066                 commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false);
8067                 expect_pending_htlcs_forwardable!(node);
8068
8069                 if idx == 0 {
8070                         let mut events_2 = node.node.get_and_clear_pending_msg_events();
8071                         assert_eq!(events_2.len(), 1);
8072                         check_added_monitors!(node, 1);
8073                         payment_event = SendEvent::from_event(events_2.remove(0));
8074                         assert_eq!(payment_event.msgs.len(), 1);
8075                 } else {
8076                         let events_2 = node.node.get_and_clear_pending_events();
8077                         assert!(events_2.is_empty());
8078                 }
8079
8080                 prev_node = node;
8081         }
8082
8083         // Second path
8084         let ev = remove_first_msg_event_to_node(&expected_paths[1][0].node.get_our_node_id(), &mut events);
8085         pass_along_path(&nodes[0], expected_paths[1], 101_000, our_payment_hash.clone(), Some(our_payment_secret), ev, true, None);
8086
8087         claim_payment_along_route(&nodes[0], expected_paths, false, our_payment_preimage);
8088 }
8089
8090 fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64) {
8091
8092         let routing_node_count = msat_amounts.len();
8093         let node_count = routing_node_count + 2;
8094
8095         let chanmon_cfgs = create_chanmon_cfgs(node_count);
8096         let node_cfgs = create_node_cfgs(node_count, &chanmon_cfgs);
8097         let node_chanmgrs = create_node_chanmgrs(node_count, &node_cfgs, &vec![None; node_count]);
8098         let nodes = create_network(node_count, &node_cfgs, &node_chanmgrs);
8099
8100         let src_idx = 0;
8101         let dst_idx = 1;
8102
8103         // Create channels for each amount
8104         let mut expected_paths = Vec::with_capacity(routing_node_count);
8105         let mut src_chan_ids = Vec::with_capacity(routing_node_count);
8106         let mut dst_chan_ids = Vec::with_capacity(routing_node_count);
8107         for i in 0..routing_node_count {
8108                 let routing_node = 2 + i;
8109                 let src_chan_id = create_announced_chan_between_nodes(&nodes, src_idx, routing_node).0.contents.short_channel_id;
8110                 src_chan_ids.push(src_chan_id);
8111                 let dst_chan_id = create_announced_chan_between_nodes(&nodes, routing_node, dst_idx).0.contents.short_channel_id;
8112                 dst_chan_ids.push(dst_chan_id);
8113                 let path = vec![&nodes[routing_node], &nodes[dst_idx]];
8114                 expected_paths.push(path);
8115         }
8116         let expected_paths: Vec<&[&Node]> = expected_paths.iter().map(|route| route.as_slice()).collect();
8117
8118         // Create a route for each amount
8119         let example_amount = 100000;
8120         let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[src_idx], nodes[dst_idx], example_amount);
8121         let sample_path = route.paths.pop().unwrap();
8122         for i in 0..routing_node_count {
8123                 let routing_node = 2 + i;
8124                 let mut path = sample_path.clone();
8125                 path[0].pubkey = nodes[routing_node].node.get_our_node_id();
8126                 path[0].short_channel_id = src_chan_ids[i];
8127                 path[1].pubkey = nodes[dst_idx].node.get_our_node_id();
8128                 path[1].short_channel_id = dst_chan_ids[i];
8129                 path[1].fee_msat = msat_amounts[i];
8130                 route.paths.push(path);
8131         }
8132
8133         // Send payment with manually set total_msat
8134         let payment_id = PaymentId(nodes[src_idx].keys_manager.backing.get_secure_random_bytes());
8135         let onion_session_privs = nodes[src_idx].node.test_add_new_pending_payment(our_payment_hash, Some(our_payment_secret), payment_id, &route).unwrap();
8136         nodes[src_idx].node.test_send_payment_internal(&route, our_payment_hash, &Some(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap();
8137         check_added_monitors!(nodes[src_idx], expected_paths.len());
8138
8139         let mut events = nodes[src_idx].node.get_and_clear_pending_msg_events();
8140         assert_eq!(events.len(), expected_paths.len());
8141         let mut amount_received = 0;
8142         for (path_idx, expected_path) in expected_paths.iter().enumerate() {
8143                 let ev = remove_first_msg_event_to_node(&expected_path[0].node.get_our_node_id(), &mut events);
8144
8145                 let current_path_amount = msat_amounts[path_idx];
8146                 amount_received += current_path_amount;
8147                 let became_claimable_now = amount_received >= total_msat && amount_received - current_path_amount < total_msat;
8148                 pass_along_path(&nodes[src_idx], expected_path, amount_received, our_payment_hash.clone(), Some(our_payment_secret), ev, became_claimable_now, None);
8149         }
8150
8151         claim_payment_along_route(&nodes[src_idx], &expected_paths, false, our_payment_preimage);
8152 }
8153
8154 #[test]
8155 fn test_overshoot_mpp() {
8156         do_test_overshoot_mpp(&[100_000, 101_000], 200_000);
8157         do_test_overshoot_mpp(&[100_000, 10_000, 100_000], 200_000);
8158 }
8159
8160 #[test]
8161 fn test_simple_mpp() {
8162         // Simple test of sending a multi-path payment.
8163         let chanmon_cfgs = create_chanmon_cfgs(4);
8164         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
8165         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
8166         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
8167
8168         let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8169         let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
8170         let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
8171         let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
8172
8173         let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
8174         let path = route.paths[0].clone();
8175         route.paths.push(path);
8176         route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
8177         route.paths[0][0].short_channel_id = chan_1_id;
8178         route.paths[0][1].short_channel_id = chan_3_id;
8179         route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
8180         route.paths[1][0].short_channel_id = chan_2_id;
8181         route.paths[1][1].short_channel_id = chan_4_id;
8182         send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret);
8183         claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
8184 }
8185
8186 #[test]
8187 fn test_preimage_storage() {
8188         // Simple test of payment preimage storage allowing no client-side storage to claim payments
8189         let chanmon_cfgs = create_chanmon_cfgs(2);
8190         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8191         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8192         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8193
8194         create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8195
8196         {
8197                 let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 7200, None).unwrap();
8198                 let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
8199                 nodes[0].node.send_payment_with_route(&route, payment_hash,
8200                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
8201                 check_added_monitors!(nodes[0], 1);
8202                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8203                 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
8204                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8205                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8206         }
8207         // Note that after leaving the above scope we have no knowledge of any arguments or return
8208         // values from previous calls.
8209         expect_pending_htlcs_forwardable!(nodes[1]);
8210         let events = nodes[1].node.get_and_clear_pending_events();
8211         assert_eq!(events.len(), 1);
8212         match events[0] {
8213                 Event::PaymentClaimable { ref purpose, .. } => {
8214                         match &purpose {
8215                                 PaymentPurpose::InvoicePayment { payment_preimage, .. } => {
8216                                         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage.unwrap());
8217                                 },
8218                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
8219                         }
8220                 },
8221                 _ => panic!("Unexpected event"),
8222         }
8223 }
8224
8225 #[test]
8226 #[allow(deprecated)]
8227 fn test_secret_timeout() {
8228         // Simple test of payment secret storage time outs. After
8229         // `create_inbound_payment(_for_hash)_legacy` is removed, this test will be removed as well.
8230         let chanmon_cfgs = create_chanmon_cfgs(2);
8231         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8232         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8233         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8234
8235         create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8236
8237         let (payment_hash, payment_secret_1) = nodes[1].node.create_inbound_payment_legacy(Some(100_000), 2).unwrap();
8238
8239         // We should fail to register the same payment hash twice, at least until we've connected a
8240         // block with time 7200 + CHAN_CONFIRM_DEPTH + 1.
8241         if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2) {
8242                 assert_eq!(err, "Duplicate payment hash");
8243         } else { panic!(); }
8244         let mut block = {
8245                 let node_1_blocks = nodes[1].blocks.lock().unwrap();
8246                 Block {
8247                         header: BlockHeader {
8248                                 version: 0x2000000,
8249                                 prev_blockhash: node_1_blocks.last().unwrap().0.block_hash(),
8250                                 merkle_root: TxMerkleNode::all_zeros(),
8251                                 time: node_1_blocks.len() as u32 + 7200, bits: 42, nonce: 42 },
8252                         txdata: vec![],
8253                 }
8254         };
8255         connect_block(&nodes[1], &block);
8256         if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2) {
8257                 assert_eq!(err, "Duplicate payment hash");
8258         } else { panic!(); }
8259
8260         // If we then connect the second block, we should be able to register the same payment hash
8261         // again (this time getting a new payment secret).
8262         block.header.prev_blockhash = block.header.block_hash();
8263         block.header.time += 1;
8264         connect_block(&nodes[1], &block);
8265         let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2).unwrap();
8266         assert_ne!(payment_secret_1, our_payment_secret);
8267
8268         {
8269                 let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
8270                 nodes[0].node.send_payment_with_route(&route, payment_hash,
8271                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(payment_hash.0)).unwrap();
8272                 check_added_monitors!(nodes[0], 1);
8273                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8274                 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
8275                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8276                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8277         }
8278         // Note that after leaving the above scope we have no knowledge of any arguments or return
8279         // values from previous calls.
8280         expect_pending_htlcs_forwardable!(nodes[1]);
8281         let events = nodes[1].node.get_and_clear_pending_events();
8282         assert_eq!(events.len(), 1);
8283         match events[0] {
8284                 Event::PaymentClaimable { purpose: PaymentPurpose::InvoicePayment { payment_preimage, payment_secret }, .. } => {
8285                         assert!(payment_preimage.is_none());
8286                         assert_eq!(payment_secret, our_payment_secret);
8287                         // We don't actually have the payment preimage with which to claim this payment!
8288                 },
8289                 _ => panic!("Unexpected event"),
8290         }
8291 }
8292
8293 #[test]
8294 fn test_bad_secret_hash() {
8295         // Simple test of unregistered payment hash/invalid payment secret handling
8296         let chanmon_cfgs = create_chanmon_cfgs(2);
8297         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8298         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8299         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8300
8301         create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8302
8303         let random_payment_hash = PaymentHash([42; 32]);
8304         let random_payment_secret = PaymentSecret([43; 32]);
8305         let (our_payment_hash, our_payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 2, None).unwrap();
8306         let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
8307
8308         // All the below cases should end up being handled exactly identically, so we macro the
8309         // resulting events.
8310         macro_rules! handle_unknown_invalid_payment_data {
8311                 ($payment_hash: expr) => {
8312                         check_added_monitors!(nodes[0], 1);
8313                         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8314                         let payment_event = SendEvent::from_event(events.pop().unwrap());
8315                         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8316                         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8317
8318                         // We have to forward pending HTLCs once to process the receipt of the HTLC and then
8319                         // again to process the pending backwards-failure of the HTLC
8320                         expect_pending_htlcs_forwardable!(nodes[1]);
8321                         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment{ payment_hash: $payment_hash }]);
8322                         check_added_monitors!(nodes[1], 1);
8323
8324                         // We should fail the payment back
8325                         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
8326                         match events.pop().unwrap() {
8327                                 MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } } => {
8328                                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
8329                                         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
8330                                 },
8331                                 _ => panic!("Unexpected event"),
8332                         }
8333                 }
8334         }
8335
8336         let expected_error_code = 0x4000|15; // incorrect_or_unknown_payment_details
8337         // Error data is the HTLC value (100,000) and current block height
8338         let expected_error_data = [0, 0, 0, 0, 0, 1, 0x86, 0xa0, 0, 0, 0, CHAN_CONFIRM_DEPTH as u8];
8339
8340         // Send a payment with the right payment hash but the wrong payment secret
8341         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
8342                 RecipientOnionFields::secret_only(random_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
8343         handle_unknown_invalid_payment_data!(our_payment_hash);
8344         expect_payment_failed!(nodes[0], our_payment_hash, true, expected_error_code, expected_error_data);
8345
8346         // Send a payment with a random payment hash, but the right payment secret
8347         nodes[0].node.send_payment_with_route(&route, random_payment_hash,
8348                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(random_payment_hash.0)).unwrap();
8349         handle_unknown_invalid_payment_data!(random_payment_hash);
8350         expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
8351
8352         // Send a payment with a random payment hash and random payment secret
8353         nodes[0].node.send_payment_with_route(&route, random_payment_hash,
8354                 RecipientOnionFields::secret_only(random_payment_secret), PaymentId(random_payment_hash.0)).unwrap();
8355         handle_unknown_invalid_payment_data!(random_payment_hash);
8356         expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
8357 }
8358
8359 #[test]
8360 fn test_update_err_monitor_lockdown() {
8361         // Our monitor will lock update of local commitment transaction if a broadcastion condition
8362         // has been fulfilled (either force-close from Channel or block height requiring a HTLC-
8363         // timeout). Trying to update monitor after lockdown should return a ChannelMonitorUpdateStatus
8364         // error.
8365         //
8366         // This scenario may happen in a watchtower setup, where watchtower process a block height
8367         // triggering a timeout while a slow-block-processing ChannelManager receives a local signed
8368         // commitment at same time.
8369
8370         let chanmon_cfgs = create_chanmon_cfgs(2);
8371         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8372         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8373         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8374
8375         // Create some initial channel
8376         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
8377         let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 };
8378
8379         // Rebalance the network to generate htlc in the two directions
8380         send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
8381
8382         // Route a HTLC from node 0 to node 1 (but don't settle)
8383         let (preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
8384
8385         // Copy ChainMonitor to simulate a watchtower and update block height of node 0 until its ChannelMonitor timeout HTLC onchain
8386         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8387         let logger = test_utils::TestLogger::with_id(format!("node {}", 0));
8388         let persister = test_utils::TestPersister::new();
8389         let watchtower = {
8390                 let new_monitor = {
8391                         let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8392                         let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
8393                                         &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8394                         assert!(new_monitor == *monitor);
8395                         new_monitor
8396                 };
8397                 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8398                 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
8399                 watchtower
8400         };
8401         let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
8402         let block = Block { header, txdata: vec![] };
8403         // Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating
8404         // transaction lock time requirements here.
8405         chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize(200, (block.clone(), 0));
8406         watchtower.chain_monitor.block_connected(&block, 200);
8407
8408         // Try to update ChannelMonitor
8409         nodes[1].node.claim_funds(preimage);
8410         check_added_monitors!(nodes[1], 1);
8411         expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
8412
8413         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8414         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
8415         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
8416         {
8417                 let mut node_0_per_peer_lock;
8418                 let mut node_0_peer_state_lock;
8419                 let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2);
8420                 if let Ok(update) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
8421                         assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
8422                         assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8423                 } else { assert!(false); }
8424         }
8425         // Our local monitor is in-sync and hasn't processed yet timeout
8426         check_added_monitors!(nodes[0], 1);
8427         let events = nodes[0].node.get_and_clear_pending_events();
8428         assert_eq!(events.len(), 1);
8429 }
8430
8431 #[test]
8432 fn test_concurrent_monitor_claim() {
8433         // Watchtower A receives block, broadcasts state N, then channel receives new state N+1,
8434         // sending it to both watchtowers, Bob accepts N+1, then receives block and broadcasts
8435         // the latest state N+1, Alice rejects state N+1, but Bob has already broadcast it,
8436         // state N+1 confirms. Alice claims output from state N+1.
8437
8438         let chanmon_cfgs = create_chanmon_cfgs(2);
8439         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8440         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8441         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8442
8443         // Create some initial channel
8444         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
8445         let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 };
8446
8447         // Rebalance the network to generate htlc in the two directions
8448         send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
8449
8450         // Route a HTLC from node 0 to node 1 (but don't settle)
8451         route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
8452
8453         // Copy ChainMonitor to simulate watchtower Alice and update block height her ChannelMonitor timeout HTLC onchain
8454         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8455         let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice"));
8456         let persister = test_utils::TestPersister::new();
8457         let watchtower_alice = {
8458                 let new_monitor = {
8459                         let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8460                         let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
8461                                         &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8462                         assert!(new_monitor == *monitor);
8463                         new_monitor
8464                 };
8465                 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8466                 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
8467                 watchtower
8468         };
8469         let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
8470         let block = Block { header, txdata: vec![] };
8471         // Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating
8472         // transaction lock time requirements here.
8473         chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize((CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS) as usize, (block.clone(), 0));
8474         watchtower_alice.chain_monitor.block_connected(&block, CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
8475
8476         // Watchtower Alice should have broadcast a commitment/HTLC-timeout
8477         {
8478                 let mut txn = chanmon_cfgs[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8479                 assert_eq!(txn.len(), 2);
8480                 txn.clear();
8481         }
8482
8483         // Copy ChainMonitor to simulate watchtower Bob and make it receive a commitment update first.
8484         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8485         let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob"));
8486         let persister = test_utils::TestPersister::new();
8487         let watchtower_bob = {
8488                 let new_monitor = {
8489                         let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8490                         let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
8491                                         &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8492                         assert!(new_monitor == *monitor);
8493                         new_monitor
8494                 };
8495                 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8496                 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
8497                 watchtower
8498         };
8499         let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
8500         watchtower_bob.chain_monitor.block_connected(&Block { header, txdata: vec![] }, CHAN_CONFIRM_DEPTH + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
8501
8502         // Route another payment to generate another update with still previous HTLC pending
8503         let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 3000000);
8504         nodes[1].node.send_payment_with_route(&route, payment_hash,
8505                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
8506         check_added_monitors!(nodes[1], 1);
8507
8508         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8509         assert_eq!(updates.update_add_htlcs.len(), 1);
8510         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]);
8511         {
8512                 let mut node_0_per_peer_lock;
8513                 let mut node_0_peer_state_lock;
8514                 let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2);
8515                 if let Ok(update) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
8516                         // Watchtower Alice should already have seen the block and reject the update
8517                         assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
8518                         assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8519                         assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8520                 } else { assert!(false); }
8521         }
8522         // Our local monitor is in-sync and hasn't processed yet timeout
8523         check_added_monitors!(nodes[0], 1);
8524
8525         //// Provide one more block to watchtower Bob, expect broadcast of commitment and HTLC-Timeout
8526         let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
8527         watchtower_bob.chain_monitor.block_connected(&Block { header, txdata: vec![] }, CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
8528
8529         // Watchtower Bob should have broadcast a commitment/HTLC-timeout
8530         let bob_state_y;
8531         {
8532                 let mut txn = chanmon_cfgs[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8533                 assert_eq!(txn.len(), 2);
8534                 bob_state_y = txn[0].clone();
8535                 txn.clear();
8536         };
8537
8538         // We confirm Bob's state Y on Alice, she should broadcast a HTLC-timeout
8539         let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
8540         watchtower_alice.chain_monitor.block_connected(&Block { header, txdata: vec![bob_state_y.clone()] }, CHAN_CONFIRM_DEPTH + 2 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
8541         {
8542                 let htlc_txn = chanmon_cfgs[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8543                 assert_eq!(htlc_txn.len(), 1);
8544                 check_spends!(htlc_txn[0], bob_state_y);
8545         }
8546 }
8547
8548 #[test]
8549 fn test_pre_lockin_no_chan_closed_update() {
8550         // Test that if a peer closes a channel in response to a funding_created message we don't
8551         // generate a channel update (as the channel cannot appear on chain without a funding_signed
8552         // message).
8553         //
8554         // Doing so would imply a channel monitor update before the initial channel monitor
8555         // registration, violating our API guarantees.
8556         //
8557         // Previously, full_stack_target managed to hit this case by opening then closing a channel,
8558         // then opening a second channel with the same funding output as the first (which is not
8559         // rejected because the first channel does not exist in the ChannelManager) and closing it
8560         // before receiving funding_signed.
8561         let chanmon_cfgs = create_chanmon_cfgs(2);
8562         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8563         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8564         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8565
8566         // Create an initial channel
8567         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
8568         let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8569         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
8570         let accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
8571         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
8572
8573         // Move the first channel through the funding flow...
8574         let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
8575
8576         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
8577         check_added_monitors!(nodes[0], 0);
8578
8579         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
8580         let channel_id = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
8581         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
8582         assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
8583         check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true);
8584 }
8585
8586 #[test]
8587 fn test_htlc_no_detection() {
8588         // This test is a mutation to underscore the detection logic bug we had
8589         // before #653. HTLC value routed is above the remaining balance, thus
8590         // inverting HTLC and `to_remote` output. HTLC will come second and
8591         // it wouldn't be seen by pre-#653 detection as we were enumerate()'ing
8592         // on a watched outputs vector (Vec<TxOut>) thus implicitly relying on
8593         // outputs order detection for correct spending children filtring.
8594
8595         let chanmon_cfgs = create_chanmon_cfgs(2);
8596         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8597         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8598         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8599
8600         // Create some initial channels
8601         let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
8602
8603         send_payment(&nodes[0], &vec!(&nodes[1])[..], 1_000_000);
8604         let (_, our_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 2_000_000);
8605         let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
8606         assert_eq!(local_txn[0].input.len(), 1);
8607         assert_eq!(local_txn[0].output.len(), 3);
8608         check_spends!(local_txn[0], chan_1.3);
8609
8610         // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
8611         let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
8612         connect_block(&nodes[0], &Block { header, txdata: vec![local_txn[0].clone()] });
8613         // We deliberately connect the local tx twice as this should provoke a failure calling
8614         // this test before #653 fix.
8615         chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &Block { header, txdata: vec![local_txn[0].clone()] }, nodes[0].best_block_info().1 + 1);
8616         check_closed_broadcast!(nodes[0], true);
8617         check_added_monitors!(nodes[0], 1);
8618         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
8619         connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1);
8620
8621         let htlc_timeout = {
8622                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8623                 assert_eq!(node_txn.len(), 1);
8624                 assert_eq!(node_txn[0].input.len(), 1);
8625                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
8626                 check_spends!(node_txn[0], local_txn[0]);
8627                 node_txn[0].clone()
8628         };
8629
8630         let header_201 = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
8631         connect_block(&nodes[0], &Block { header: header_201, txdata: vec![htlc_timeout.clone()] });
8632         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
8633         expect_payment_failed!(nodes[0], our_payment_hash, false);
8634 }
8635
8636 fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain_before_fulfill: bool) {
8637         // If we route an HTLC, then learn the HTLC's preimage after the upstream channel has been
8638         // force-closed, we must claim that HTLC on-chain. (Given an HTLC forwarded from Alice --> Bob -->
8639         // Carol, Alice would be the upstream node, and Carol the downstream.)
8640         //
8641         // Steps of the test:
8642         // 1) Alice sends a HTLC to Carol through Bob.
8643         // 2) Carol doesn't settle the HTLC.
8644         // 3) If broadcast_alice is true, Alice force-closes her channel with Bob. Else Bob force closes.
8645         // Steps 4 and 5 may be reordered depending on go_onchain_before_fulfill.
8646         // 4) Bob sees the Alice's commitment on his chain or vice versa. An offered output is present
8647         //    but can't be claimed as Bob doesn't have yet knowledge of the preimage.
8648         // 5) Carol release the preimage to Bob off-chain.
8649         // 6) Bob claims the offered output on the broadcasted commitment.
8650         let chanmon_cfgs = create_chanmon_cfgs(3);
8651         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
8652         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
8653         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
8654
8655         // Create some initial channels
8656         let chan_ab = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
8657         create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 10001);
8658
8659         // Steps (1) and (2):
8660         // Send an HTLC Alice --> Bob --> Carol, but Carol doesn't settle the HTLC back.
8661         let (payment_preimage, payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
8662
8663         // Check that Alice's commitment transaction now contains an output for this HTLC.
8664         let alice_txn = get_local_commitment_txn!(nodes[0], chan_ab.2);
8665         check_spends!(alice_txn[0], chan_ab.3);
8666         assert_eq!(alice_txn[0].output.len(), 2);
8667         check_spends!(alice_txn[1], alice_txn[0]); // 2nd transaction is a non-final HTLC-timeout
8668         assert_eq!(alice_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
8669         assert_eq!(alice_txn.len(), 2);
8670
8671         // Steps (3) and (4):
8672         // If `go_onchain_before_fufill`, broadcast the relevant commitment transaction and check that Bob
8673         // responds by (1) broadcasting a channel update and (2) adding a new ChannelMonitor.
8674         let mut force_closing_node = 0; // Alice force-closes
8675         let mut counterparty_node = 1; // Bob if Alice force-closes
8676
8677         // Bob force-closes
8678         if !broadcast_alice {
8679                 force_closing_node = 1;
8680                 counterparty_node = 0;
8681         }
8682         nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
8683         check_closed_broadcast!(nodes[force_closing_node], true);
8684         check_added_monitors!(nodes[force_closing_node], 1);
8685         check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed);
8686         if go_onchain_before_fulfill {
8687                 let txn_to_broadcast = match broadcast_alice {
8688                         true => alice_txn.clone(),
8689                         false => get_local_commitment_txn!(nodes[1], chan_ab.2)
8690                 };
8691                 let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42};
8692                 connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]});
8693                 if broadcast_alice {
8694                         check_closed_broadcast!(nodes[1], true);
8695                         check_added_monitors!(nodes[1], 1);
8696                         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
8697                 }
8698         }
8699
8700         // Step (5):
8701         // Carol then claims the funds and sends an update_fulfill message to Bob, and they go through the
8702         // process of removing the HTLC from their commitment transactions.
8703         nodes[2].node.claim_funds(payment_preimage);
8704         check_added_monitors!(nodes[2], 1);
8705         expect_payment_claimed!(nodes[2], payment_hash, 3_000_000);
8706
8707         let carol_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
8708         assert!(carol_updates.update_add_htlcs.is_empty());
8709         assert!(carol_updates.update_fail_htlcs.is_empty());
8710         assert!(carol_updates.update_fail_malformed_htlcs.is_empty());
8711         assert!(carol_updates.update_fee.is_none());
8712         assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1);
8713
8714         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]);
8715         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], if go_onchain_before_fulfill || force_closing_node == 1 { None } else { Some(1000) }, false, false);
8716         // If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage.
8717         if !go_onchain_before_fulfill && broadcast_alice {
8718                 let events = nodes[1].node.get_and_clear_pending_msg_events();
8719                 assert_eq!(events.len(), 1);
8720                 match events[0] {
8721                         MessageSendEvent::UpdateHTLCs { ref node_id, .. } => {
8722                                 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8723                         },
8724                         _ => panic!("Unexpected event"),
8725                 };
8726         }
8727         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &carol_updates.commitment_signed);
8728         // One monitor update for the preimage to update the Bob<->Alice channel, one monitor update
8729         // Carol<->Bob's updated commitment transaction info.
8730         check_added_monitors!(nodes[1], 2);
8731
8732         let events = nodes[1].node.get_and_clear_pending_msg_events();
8733         assert_eq!(events.len(), 2);
8734         let bob_revocation = match events[0] {
8735                 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
8736                         assert_eq!(*node_id, nodes[2].node.get_our_node_id());
8737                         (*msg).clone()
8738                 },
8739                 _ => panic!("Unexpected event"),
8740         };
8741         let bob_updates = match events[1] {
8742                 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
8743                         assert_eq!(*node_id, nodes[2].node.get_our_node_id());
8744                         (*updates).clone()
8745                 },
8746                 _ => panic!("Unexpected event"),
8747         };
8748
8749         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revocation);
8750         check_added_monitors!(nodes[2], 1);
8751         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_updates.commitment_signed);
8752         check_added_monitors!(nodes[2], 1);
8753
8754         let events = nodes[2].node.get_and_clear_pending_msg_events();
8755         assert_eq!(events.len(), 1);
8756         let carol_revocation = match events[0] {
8757                 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
8758                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
8759                         (*msg).clone()
8760                 },
8761                 _ => panic!("Unexpected event"),
8762         };
8763         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &carol_revocation);
8764         check_added_monitors!(nodes[1], 1);
8765
8766         // If this test requires the force-closed channel to not be on-chain until after the fulfill,
8767         // here's where we put said channel's commitment tx on-chain.
8768         let mut txn_to_broadcast = alice_txn.clone();
8769         if !broadcast_alice { txn_to_broadcast = get_local_commitment_txn!(nodes[1], chan_ab.2); }
8770         if !go_onchain_before_fulfill {
8771                 let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42};
8772                 connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]});
8773                 // If Bob was the one to force-close, he will have already passed these checks earlier.
8774                 if broadcast_alice {
8775                         check_closed_broadcast!(nodes[1], true);
8776                         check_added_monitors!(nodes[1], 1);
8777                         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
8778                 }
8779                 let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
8780                 if broadcast_alice {
8781                         assert_eq!(bob_txn.len(), 1);
8782                         check_spends!(bob_txn[0], txn_to_broadcast[0]);
8783                 } else {
8784                         assert_eq!(bob_txn.len(), 2);
8785                         check_spends!(bob_txn[0], chan_ab.3);
8786                 }
8787         }
8788
8789         // Step (6):
8790         // Finally, check that Bob broadcasted a preimage-claiming transaction for the HTLC output on the
8791         // broadcasted commitment transaction.
8792         {
8793                 let script_weight = match broadcast_alice {
8794                         true => OFFERED_HTLC_SCRIPT_WEIGHT,
8795                         false => ACCEPTED_HTLC_SCRIPT_WEIGHT
8796                 };
8797                 // If Alice force-closed, Bob only broadcasts a HTLC-output-claiming transaction. Otherwise,
8798                 // Bob force-closed and broadcasts the commitment transaction along with a
8799                 // HTLC-output-claiming transaction.
8800                 let bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
8801                 if broadcast_alice {
8802                         assert_eq!(bob_txn.len(), 1);
8803                         check_spends!(bob_txn[0], txn_to_broadcast[0]);
8804                         assert_eq!(bob_txn[0].input[0].witness.last().unwrap().len(), script_weight);
8805                 } else {
8806                         assert_eq!(bob_txn.len(), 2);
8807                         check_spends!(bob_txn[1], txn_to_broadcast[0]);
8808                         assert_eq!(bob_txn[1].input[0].witness.last().unwrap().len(), script_weight);
8809                 }
8810         }
8811 }
8812
8813 #[test]
8814 fn test_onchain_htlc_settlement_after_close() {
8815         do_test_onchain_htlc_settlement_after_close(true, true);
8816         do_test_onchain_htlc_settlement_after_close(false, true); // Technically redundant, but may as well
8817         do_test_onchain_htlc_settlement_after_close(true, false);
8818         do_test_onchain_htlc_settlement_after_close(false, false);
8819 }
8820
8821 #[test]
8822 fn test_duplicate_temporary_channel_id_from_different_peers() {
8823         // Tests that we can accept two different `OpenChannel` requests with the same
8824         // `temporary_channel_id`, as long as they are from different peers.
8825         let chanmon_cfgs = create_chanmon_cfgs(3);
8826         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
8827         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
8828         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
8829
8830         // Create an first channel channel
8831         nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
8832         let mut open_chan_msg_chan_1_0 = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
8833
8834         // Create an second channel
8835         nodes[2].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
8836         let mut open_chan_msg_chan_2_0 = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
8837
8838         // Modify the `OpenChannel` from `nodes[2]` to `nodes[0]` to ensure that it uses the same
8839         // `temporary_channel_id` as the `OpenChannel` from nodes[1] to nodes[0].
8840         open_chan_msg_chan_2_0.temporary_channel_id = open_chan_msg_chan_1_0.temporary_channel_id;
8841
8842         // Assert that `nodes[0]` can accept both `OpenChannel` requests, even though they use the same
8843         // `temporary_channel_id` as they are from different peers.
8844         nodes[0].node.handle_open_channel(&nodes[1].node.get_our_node_id(), &open_chan_msg_chan_1_0);
8845         {
8846                 let events = nodes[0].node.get_and_clear_pending_msg_events();
8847                 assert_eq!(events.len(), 1);
8848                 match &events[0] {
8849                         MessageSendEvent::SendAcceptChannel { node_id, msg } => {
8850                                 assert_eq!(node_id, &nodes[1].node.get_our_node_id());
8851                                 assert_eq!(msg.temporary_channel_id, open_chan_msg_chan_1_0.temporary_channel_id);
8852                         },
8853                         _ => panic!("Unexpected event"),
8854                 }
8855         }
8856
8857         nodes[0].node.handle_open_channel(&nodes[2].node.get_our_node_id(), &open_chan_msg_chan_2_0);
8858         {
8859                 let events = nodes[0].node.get_and_clear_pending_msg_events();
8860                 assert_eq!(events.len(), 1);
8861                 match &events[0] {
8862                         MessageSendEvent::SendAcceptChannel { node_id, msg } => {
8863                                 assert_eq!(node_id, &nodes[2].node.get_our_node_id());
8864                                 assert_eq!(msg.temporary_channel_id, open_chan_msg_chan_1_0.temporary_channel_id);
8865                         },
8866                         _ => panic!("Unexpected event"),
8867                 }
8868         }
8869 }
8870
8871 #[test]
8872 fn test_duplicate_chan_id() {
8873         // Test that if a given peer tries to open a channel with the same channel_id as one that is
8874         // already open we reject it and keep the old channel.
8875         //
8876         // Previously, full_stack_target managed to figure out that if you tried to open two channels
8877         // with the same funding output (ie post-funding channel_id), we'd create a monitor update for
8878         // the existing channel when we detect the duplicate new channel, screwing up our monitor
8879         // updating logic for the existing channel.
8880         let chanmon_cfgs = create_chanmon_cfgs(2);
8881         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8882         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8883         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8884
8885         // Create an initial channel
8886         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
8887         let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8888         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
8889         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
8890
8891         // Try to create a second channel with the same temporary_channel_id as the first and check
8892         // that it is rejected.
8893         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
8894         {
8895                 let events = nodes[1].node.get_and_clear_pending_msg_events();
8896                 assert_eq!(events.len(), 1);
8897                 match events[0] {
8898                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
8899                                 // Technically, at this point, nodes[1] would be justified in thinking both the
8900                                 // first (valid) and second (invalid) channels are closed, given they both have
8901                                 // the same non-temporary channel_id. However, currently we do not, so we just
8902                                 // move forward with it.
8903                                 assert_eq!(msg.channel_id, open_chan_msg.temporary_channel_id);
8904                                 assert_eq!(node_id, nodes[0].node.get_our_node_id());
8905                         },
8906                         _ => panic!("Unexpected event"),
8907                 }
8908         }
8909
8910         // Move the first channel through the funding flow...
8911         let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
8912
8913         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
8914         check_added_monitors!(nodes[0], 0);
8915
8916         let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
8917         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
8918         {
8919                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
8920                 assert_eq!(added_monitors.len(), 1);
8921                 assert_eq!(added_monitors[0].0, funding_output);
8922                 added_monitors.clear();
8923         }
8924         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
8925
8926         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
8927
8928         let funding_outpoint = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index };
8929         let channel_id = funding_outpoint.to_channel_id();
8930
8931         // Now we have the first channel past funding_created (ie it has a txid-based channel_id, not a
8932         // temporary one).
8933
8934         // First try to open a second channel with a temporary channel id equal to the txid-based one.
8935         // Technically this is allowed by the spec, but we don't support it and there's little reason
8936         // to. Still, it shouldn't cause any other issues.
8937         open_chan_msg.temporary_channel_id = channel_id;
8938         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
8939         {
8940                 let events = nodes[1].node.get_and_clear_pending_msg_events();
8941                 assert_eq!(events.len(), 1);
8942                 match events[0] {
8943                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
8944                                 // Technically, at this point, nodes[1] would be justified in thinking both
8945                                 // channels are closed, but currently we do not, so we just move forward with it.
8946                                 assert_eq!(msg.channel_id, open_chan_msg.temporary_channel_id);
8947                                 assert_eq!(node_id, nodes[0].node.get_our_node_id());
8948                         },
8949                         _ => panic!("Unexpected event"),
8950                 }
8951         }
8952
8953         // Now try to create a second channel which has a duplicate funding output.
8954         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
8955         let open_chan_2_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8956         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_2_msg);
8957         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
8958         create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); // Get and check the FundingGenerationReady event
8959
8960         let funding_created = {
8961                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
8962                 let mut a_peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
8963                 // Once we call `get_outbound_funding_created` the channel has a duplicate channel_id as
8964                 // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
8965                 // try to create another channel. Instead, we drop the channel entirely here (leaving the
8966                 // channelmanager in a possibly nonsense state instead).
8967                 let mut as_chan = a_peer_state.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap();
8968                 let logger = test_utils::TestLogger::new();
8969                 as_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap()
8970         };
8971         check_added_monitors!(nodes[0], 0);
8972         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
8973         // At this point we'll look up if the channel_id is present and immediately fail the channel
8974         // without trying to persist the `ChannelMonitor`.
8975         check_added_monitors!(nodes[1], 0);
8976
8977         // ...still, nodes[1] will reject the duplicate channel.
8978         {
8979                 let events = nodes[1].node.get_and_clear_pending_msg_events();
8980                 assert_eq!(events.len(), 1);
8981                 match events[0] {
8982                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
8983                                 // Technically, at this point, nodes[1] would be justified in thinking both
8984                                 // channels are closed, but currently we do not, so we just move forward with it.
8985                                 assert_eq!(msg.channel_id, channel_id);
8986                                 assert_eq!(node_id, nodes[0].node.get_our_node_id());
8987                         },
8988                         _ => panic!("Unexpected event"),
8989                 }
8990         }
8991
8992         // finally, finish creating the original channel and send a payment over it to make sure
8993         // everything is functional.
8994         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
8995         {
8996                 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
8997                 assert_eq!(added_monitors.len(), 1);
8998                 assert_eq!(added_monitors[0].0, funding_output);
8999                 added_monitors.clear();
9000         }
9001         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9002
9003         let events_4 = nodes[0].node.get_and_clear_pending_events();
9004         assert_eq!(events_4.len(), 0);
9005         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
9006         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
9007
9008         let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
9009         let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
9010         update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
9011
9012         send_payment(&nodes[0], &[&nodes[1]], 8000000);
9013 }
9014
9015 #[test]
9016 fn test_error_chans_closed() {
9017         // Test that we properly handle error messages, closing appropriate channels.
9018         //
9019         // Prior to #787 we'd allow a peer to make us force-close a channel we had with a different
9020         // peer. The "real" fix for that is to index channels with peers_ids, however in the mean time
9021         // we can test various edge cases around it to ensure we don't regress.
9022         let chanmon_cfgs = create_chanmon_cfgs(3);
9023         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9024         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9025         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9026
9027         // Create some initial channels
9028         let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9029         let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9030         let chan_3 = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100000, 10001);
9031
9032         assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
9033         assert_eq!(nodes[1].node.list_usable_channels().len(), 2);
9034         assert_eq!(nodes[2].node.list_usable_channels().len(), 1);
9035
9036         // Closing a channel from a different peer has no effect
9037         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_3.2, data: "ERR".to_owned() });
9038         assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
9039
9040         // Closing one channel doesn't impact others
9041         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
9042         check_added_monitors!(nodes[0], 1);
9043         check_closed_broadcast!(nodes[0], false);
9044         check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) });
9045         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
9046         assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
9047         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
9048         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_3.2);
9049
9050         // A null channel ID should close all channels
9051         let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9052         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() });
9053         check_added_monitors!(nodes[0], 2);
9054         check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) });
9055         let events = nodes[0].node.get_and_clear_pending_msg_events();
9056         assert_eq!(events.len(), 2);
9057         match events[0] {
9058                 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
9059                         assert_eq!(msg.contents.flags & 2, 2);
9060                 },
9061                 _ => panic!("Unexpected event"),
9062         }
9063         match events[1] {
9064                 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
9065                         assert_eq!(msg.contents.flags & 2, 2);
9066                 },
9067                 _ => panic!("Unexpected event"),
9068         }
9069         // Note that at this point users of a standard PeerHandler will end up calling
9070         // peer_disconnected.
9071         assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
9072         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
9073
9074         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
9075         assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
9076         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
9077 }
9078
9079 #[test]
9080 fn test_invalid_funding_tx() {
9081         // Test that we properly handle invalid funding transactions sent to us from a peer.
9082         //
9083         // Previously, all other major lightning implementations had failed to properly sanitize
9084         // funding transactions from their counterparties, leading to a multi-implementation critical
9085         // security vulnerability (though we always sanitized properly, we've previously had
9086         // un-released crashes in the sanitization process).
9087         //
9088         // Further, if the funding transaction is consensus-valid, confirms, and is later spent, we'd
9089         // previously have crashed in `ChannelMonitor` even though we closed the channel as bogus and
9090         // gave up on it. We test this here by generating such a transaction.
9091         let chanmon_cfgs = create_chanmon_cfgs(2);
9092         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9093         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9094         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9095
9096         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 10_000, 42, None).unwrap();
9097         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
9098         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9099
9100         let (temporary_channel_id, mut tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
9101
9102         // Create a witness program which can be spent by a 4-empty-stack-elements witness and which is
9103         // 136 bytes long. This matches our "accepted HTLC preimage spend" matching, previously causing
9104         // a panic as we'd try to extract a 32 byte preimage from a witness element without checking
9105         // its length.
9106         let mut wit_program: Vec<u8> = channelmonitor::deliberately_bogus_accepted_htlc_witness_program();
9107         let wit_program_script: Script = wit_program.into();
9108         for output in tx.output.iter_mut() {
9109                 // Make the confirmed funding transaction have a bogus script_pubkey
9110                 output.script_pubkey = Script::new_v0_p2wsh(&wit_program_script.wscript_hash());
9111         }
9112
9113         nodes[0].node.funding_transaction_generated_unchecked(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone(), 0).unwrap();
9114         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
9115         check_added_monitors!(nodes[1], 1);
9116         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9117
9118         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
9119         check_added_monitors!(nodes[0], 1);
9120         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9121
9122         let events_1 = nodes[0].node.get_and_clear_pending_events();
9123         assert_eq!(events_1.len(), 0);
9124
9125         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
9126         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
9127         nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
9128
9129         let expected_err = "funding tx had wrong script/value or output index";
9130         confirm_transaction_at(&nodes[1], &tx, 1);
9131         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() });
9132         check_added_monitors!(nodes[1], 1);
9133         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
9134         assert_eq!(events_2.len(), 1);
9135         if let MessageSendEvent::HandleError { node_id, action } = &events_2[0] {
9136                 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
9137                 if let msgs::ErrorAction::SendErrorMessage { msg } = action {
9138                         assert_eq!(msg.data, "Channel closed because of an exception: ".to_owned() + expected_err);
9139                 } else { panic!(); }
9140         } else { panic!(); }
9141         assert_eq!(nodes[1].node.list_channels().len(), 0);
9142
9143         // Now confirm a spend of the (bogus) funding transaction. As long as the witness is 5 elements
9144         // long the ChannelMonitor will try to read 32 bytes from the second-to-last element, panicing
9145         // as its not 32 bytes long.
9146         let mut spend_tx = Transaction {
9147                 version: 2i32, lock_time: PackedLockTime::ZERO,
9148                 input: tx.output.iter().enumerate().map(|(idx, _)| TxIn {
9149                         previous_output: BitcoinOutPoint {
9150                                 txid: tx.txid(),
9151                                 vout: idx as u32,
9152                         },
9153                         script_sig: Script::new(),
9154                         sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
9155                         witness: Witness::from_vec(channelmonitor::deliberately_bogus_accepted_htlc_witness())
9156                 }).collect(),
9157                 output: vec![TxOut {
9158                         value: 1000,
9159                         script_pubkey: Script::new(),
9160                 }]
9161         };
9162         check_spends!(spend_tx, tx);
9163         mine_transaction(&nodes[1], &spend_tx);
9164 }
9165
9166 fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_timelock: bool) {
9167         // In the first version of the chain::Confirm interface, after a refactor was made to not
9168         // broadcast CSV-locked transactions until their CSV lock is up, we wouldn't reliably broadcast
9169         // transactions after a `transactions_confirmed` call. Specifically, if the chain, provided via
9170         // `best_block_updated` is at height N, and a transaction output which we wish to spend at
9171         // height N-1 (due to a CSV to height N-1) is provided at height N, we will not broadcast the
9172         // spending transaction until height N+1 (or greater). This was due to the way
9173         // `ChannelMonitor::transactions_confirmed` worked, only checking if we should broadcast a
9174         // spending transaction at the height the input transaction was confirmed at, not whether we
9175         // should broadcast a spending transaction at the current height.
9176         // A second, similar, issue involved failing HTLCs backwards - because we only provided the
9177         // height at which transactions were confirmed to `OnchainTx::update_claims_view`, it wasn't
9178         // aware that the anti-reorg-delay had, in fact, already expired, waiting to fail-backwards
9179         // until we learned about an additional block.
9180         //
9181         // As an additional check, if `test_height_before_timelock` is set, we instead test that we
9182         // aren't broadcasting transactions too early (ie not broadcasting them at all).
9183         let chanmon_cfgs = create_chanmon_cfgs(3);
9184         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9185         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9186         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9187         *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
9188
9189         create_announced_chan_between_nodes(&nodes, 0, 1);
9190         let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
9191         let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
9192         nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
9193         nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
9194
9195         nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
9196         check_closed_broadcast!(nodes[1], true);
9197         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
9198         check_added_monitors!(nodes[1], 1);
9199         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
9200         assert_eq!(node_txn.len(), 1);
9201
9202         let conf_height = nodes[1].best_block_info().1;
9203         if !test_height_before_timelock {
9204                 connect_blocks(&nodes[1], 24 * 6);
9205         }
9206         nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
9207                 &nodes[1].get_block_header(conf_height), &[(0, &node_txn[0])], conf_height);
9208         if test_height_before_timelock {
9209                 // If we confirmed the close transaction, but timelocks have not yet expired, we should not
9210                 // generate any events or broadcast any transactions
9211                 assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
9212                 assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
9213         } else {
9214                 // We should broadcast an HTLC transaction spending our funding transaction first
9215                 let spending_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
9216                 assert_eq!(spending_txn.len(), 2);
9217                 assert_eq!(spending_txn[0], node_txn[0]);
9218                 check_spends!(spending_txn[1], node_txn[0]);
9219                 // We should also generate a SpendableOutputs event with the to_self output (as its
9220                 // timelock is up).
9221                 let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
9222                 assert_eq!(descriptor_spend_txn.len(), 1);
9223
9224                 // If we also discover that the HTLC-Timeout transaction was confirmed some time ago, we
9225                 // should immediately fail-backwards the HTLC to the previous hop, without waiting for an
9226                 // additional block built on top of the current chain.
9227                 nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
9228                         &nodes[1].get_block_header(conf_height + 1), &[(0, &spending_txn[1])], conf_height + 1);
9229                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]);
9230                 check_added_monitors!(nodes[1], 1);
9231
9232                 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9233                 assert!(updates.update_add_htlcs.is_empty());
9234                 assert!(updates.update_fulfill_htlcs.is_empty());
9235                 assert_eq!(updates.update_fail_htlcs.len(), 1);
9236                 assert!(updates.update_fail_malformed_htlcs.is_empty());
9237                 assert!(updates.update_fee.is_none());
9238                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
9239                 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
9240                 expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_announce.contents.short_channel_id, true);
9241         }
9242 }
9243
9244 #[test]
9245 fn test_tx_confirmed_skipping_blocks_immediate_broadcast() {
9246         do_test_tx_confirmed_skipping_blocks_immediate_broadcast(false);
9247         do_test_tx_confirmed_skipping_blocks_immediate_broadcast(true);
9248 }
9249
9250 fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) {
9251         let chanmon_cfgs = create_chanmon_cfgs(2);
9252         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9253         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9254         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9255
9256         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9257
9258         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
9259                 .with_features(nodes[1].node.invoice_features());
9260         let route = get_route!(nodes[0], payment_params, 10_000, TEST_FINAL_CLTV).unwrap();
9261
9262         let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[1]);
9263
9264         {
9265                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
9266                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
9267                 check_added_monitors!(nodes[0], 1);
9268                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9269                 assert_eq!(events.len(), 1);
9270                 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
9271                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9272                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
9273         }
9274         expect_pending_htlcs_forwardable!(nodes[1]);
9275         expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 10_000);
9276
9277         {
9278                 // Note that we use a different PaymentId here to allow us to duplicativly pay
9279                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
9280                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_secret.0)).unwrap();
9281                 check_added_monitors!(nodes[0], 1);
9282                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9283                 assert_eq!(events.len(), 1);
9284                 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
9285                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9286                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
9287                 // At this point, nodes[1] would notice it has too much value for the payment. It will
9288                 // assume the second is a privacy attack (no longer particularly relevant
9289                 // post-payment_secrets) and fail back the new HTLC. Previously, it'd also have failed back
9290                 // the first HTLC delivered above.
9291         }
9292
9293         expect_pending_htlcs_forwardable_ignore!(nodes[1]);
9294         nodes[1].node.process_pending_htlc_forwards();
9295
9296         if test_for_second_fail_panic {
9297                 // Now we go fail back the first HTLC from the user end.
9298                 nodes[1].node.fail_htlc_backwards(&our_payment_hash);
9299
9300                 let expected_destinations = vec![
9301                         HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
9302                         HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
9303                 ];
9304                 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1],  expected_destinations);
9305                 nodes[1].node.process_pending_htlc_forwards();
9306
9307                 check_added_monitors!(nodes[1], 1);
9308                 let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9309                 assert_eq!(fail_updates_1.update_fail_htlcs.len(), 2);
9310
9311                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9312                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[1]);
9313                 commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
9314
9315                 let failure_events = nodes[0].node.get_and_clear_pending_events();
9316                 assert_eq!(failure_events.len(), 4);
9317                 if let Event::PaymentPathFailed { .. } = failure_events[0] {} else { panic!(); }
9318                 if let Event::PaymentFailed { .. } = failure_events[1] {} else { panic!(); }
9319                 if let Event::PaymentPathFailed { .. } = failure_events[2] {} else { panic!(); }
9320                 if let Event::PaymentFailed { .. } = failure_events[3] {} else { panic!(); }
9321         } else {
9322                 // Let the second HTLC fail and claim the first
9323                 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
9324                 nodes[1].node.process_pending_htlc_forwards();
9325
9326                 check_added_monitors!(nodes[1], 1);
9327                 let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9328                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9329                 commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
9330
9331                 expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new());
9332
9333                 claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
9334         }
9335 }
9336
9337 #[test]
9338 fn test_dup_htlc_second_fail_panic() {
9339         // Previously, if we received two HTLCs back-to-back, where the second overran the expected
9340         // value for the payment, we'd fail back both HTLCs after generating a `PaymentClaimable` event.
9341         // Then, if the user failed the second payment, they'd hit a "tried to fail an already failed
9342         // HTLC" debug panic. This tests for this behavior, checking that only one HTLC is auto-failed.
9343         do_test_dup_htlc_second_rejected(true);
9344 }
9345
9346 #[test]
9347 fn test_dup_htlc_second_rejected() {
9348         // Test that if we receive a second HTLC for an MPP payment that overruns the payment amount we
9349         // simply reject the second HTLC but are still able to claim the first HTLC.
9350         do_test_dup_htlc_second_rejected(false);
9351 }
9352
9353 #[test]
9354 fn test_inconsistent_mpp_params() {
9355         // Test that if we recieve two HTLCs with different payment parameters we fail back the first
9356         // such HTLC and allow the second to stay.
9357         let chanmon_cfgs = create_chanmon_cfgs(4);
9358         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9359         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9360         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9361
9362         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0);
9363         create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0);
9364         create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0);
9365         let chan_2_3 =create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0);
9366
9367         let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV)
9368                 .with_features(nodes[3].node.invoice_features());
9369         let mut route = get_route!(nodes[0], payment_params, 15_000_000, TEST_FINAL_CLTV).unwrap();
9370         assert_eq!(route.paths.len(), 2);
9371         route.paths.sort_by(|path_a, _| {
9372                 // Sort the path so that the path through nodes[1] comes first
9373                 if path_a[0].pubkey == nodes[1].node.get_our_node_id() {
9374                         core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
9375         });
9376
9377         let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[3]);
9378
9379         let cur_height = nodes[0].best_block_info().1;
9380         let payment_id = PaymentId([42; 32]);
9381
9382         let session_privs = {
9383                 // We create a fake route here so that we start with three pending HTLCs, which we'll
9384                 // ultimately have, just not right away.
9385                 let mut dup_route = route.clone();
9386                 dup_route.paths.push(route.paths[1].clone());
9387                 nodes[0].node.test_add_new_pending_payment(our_payment_hash, Some(our_payment_secret), payment_id, &dup_route).unwrap()
9388         };
9389         nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
9390         check_added_monitors!(nodes[0], 1);
9391
9392         {
9393                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9394                 assert_eq!(events.len(), 1);
9395                 pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), false, None);
9396         }
9397         assert!(nodes[3].node.get_and_clear_pending_events().is_empty());
9398
9399         nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash, &Some(our_payment_secret), 14_000_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
9400         check_added_monitors!(nodes[0], 1);
9401
9402         {
9403                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9404                 assert_eq!(events.len(), 1);
9405                 let payment_event = SendEvent::from_event(events.pop().unwrap());
9406
9407                 nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9408                 commitment_signed_dance!(nodes[2], nodes[0], payment_event.commitment_msg, false);
9409
9410                 expect_pending_htlcs_forwardable!(nodes[2]);
9411                 check_added_monitors!(nodes[2], 1);
9412
9413                 let mut events = nodes[2].node.get_and_clear_pending_msg_events();
9414                 assert_eq!(events.len(), 1);
9415                 let payment_event = SendEvent::from_event(events.pop().unwrap());
9416
9417                 nodes[3].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
9418                 check_added_monitors!(nodes[3], 0);
9419                 commitment_signed_dance!(nodes[3], nodes[2], payment_event.commitment_msg, true, true);
9420
9421                 // At this point, nodes[3] should notice the two HTLCs don't contain the same total payment
9422                 // amount. It will assume the second is a privacy attack (no longer particularly relevant
9423                 // post-payment_secrets) and fail back the new HTLC.
9424         }
9425         expect_pending_htlcs_forwardable_ignore!(nodes[3]);
9426         nodes[3].node.process_pending_htlc_forwards();
9427         expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
9428         nodes[3].node.process_pending_htlc_forwards();
9429
9430         check_added_monitors!(nodes[3], 1);
9431
9432         let fail_updates_1 = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
9433         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9434         commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false);
9435
9436         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]);
9437         check_added_monitors!(nodes[2], 1);
9438
9439         let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
9440         nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &fail_updates_2.update_fail_htlcs[0]);
9441         commitment_signed_dance!(nodes[0], nodes[2], fail_updates_2.commitment_signed, false);
9442
9443         expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain());
9444
9445         nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None, session_privs[2]).unwrap();
9446         check_added_monitors!(nodes[0], 1);
9447
9448         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9449         assert_eq!(events.len(), 1);
9450         pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), true, None);
9451
9452         do_claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, our_payment_preimage);
9453         let events = nodes[0].node.get_and_clear_pending_events();
9454         assert_eq!(events.len(), 3);
9455         match events[0] {
9456                 Event::PaymentSent { payment_hash, .. } => { // The payment was abandoned earlier, so the fee paid will be None
9457                         assert_eq!(payment_hash, our_payment_hash);
9458                 },
9459                 _ => panic!("Unexpected event")
9460         }
9461         match events[1] {
9462                 Event::PaymentPathSuccessful { payment_hash, .. } => {
9463                         assert_eq!(payment_hash.unwrap(), our_payment_hash);
9464                 },
9465                 _ => panic!("Unexpected event")
9466         }
9467         match events[2] {
9468                 Event::PaymentPathSuccessful { payment_hash, .. } => {
9469                         assert_eq!(payment_hash.unwrap(), our_payment_hash);
9470                 },
9471                 _ => panic!("Unexpected event")
9472         }
9473 }
9474
9475 #[test]
9476 fn test_keysend_payments_to_public_node() {
9477         let chanmon_cfgs = create_chanmon_cfgs(2);
9478         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9479         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9480         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9481
9482         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9483         let network_graph = nodes[0].network_graph.clone();
9484         let payer_pubkey = nodes[0].node.get_our_node_id();
9485         let payee_pubkey = nodes[1].node.get_our_node_id();
9486         let route_params = RouteParameters {
9487                 payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
9488                 final_value_msat: 10000,
9489         };
9490         let scorer = test_utils::TestScorer::new();
9491         let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
9492         let route = find_route(&payer_pubkey, &route_params, &network_graph, None, nodes[0].logger, &scorer, &random_seed_bytes).unwrap();
9493
9494         let test_preimage = PaymentPreimage([42; 32]);
9495         let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage),
9496                 RecipientOnionFields::spontaneous_empty(), PaymentId(test_preimage.0)).unwrap();
9497         check_added_monitors!(nodes[0], 1);
9498         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9499         assert_eq!(events.len(), 1);
9500         let event = events.pop().unwrap();
9501         let path = vec![&nodes[1]];
9502         pass_along_path(&nodes[0], &path, 10000, payment_hash, None, event, true, Some(test_preimage));
9503         claim_payment(&nodes[0], &path, test_preimage);
9504 }
9505
9506 #[test]
9507 fn test_keysend_payments_to_private_node() {
9508         let chanmon_cfgs = create_chanmon_cfgs(2);
9509         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9510         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9511         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9512
9513         let payer_pubkey = nodes[0].node.get_our_node_id();
9514         let payee_pubkey = nodes[1].node.get_our_node_id();
9515
9516         let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
9517         let route_params = RouteParameters {
9518                 payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
9519                 final_value_msat: 10000,
9520         };
9521         let network_graph = nodes[0].network_graph.clone();
9522         let first_hops = nodes[0].node.list_usable_channels();
9523         let scorer = test_utils::TestScorer::new();
9524         let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
9525         let route = find_route(
9526                 &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
9527                 nodes[0].logger, &scorer, &random_seed_bytes
9528         ).unwrap();
9529
9530         let test_preimage = PaymentPreimage([42; 32]);
9531         let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage),
9532                 RecipientOnionFields::spontaneous_empty(), PaymentId(test_preimage.0)).unwrap();
9533         check_added_monitors!(nodes[0], 1);
9534         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9535         assert_eq!(events.len(), 1);
9536         let event = events.pop().unwrap();
9537         let path = vec![&nodes[1]];
9538         pass_along_path(&nodes[0], &path, 10000, payment_hash, None, event, true, Some(test_preimage));
9539         claim_payment(&nodes[0], &path, test_preimage);
9540 }
9541
9542 #[test]
9543 fn test_double_partial_claim() {
9544         // Test what happens if a node receives a payment, generates a PaymentClaimable event, the HTLCs
9545         // time out, the sender resends only some of the MPP parts, then the user processes the
9546         // PaymentClaimable event, ensuring they don't inadvertently claim only part of the full payment
9547         // amount.
9548         let chanmon_cfgs = create_chanmon_cfgs(4);
9549         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9550         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9551         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9552
9553         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0);
9554         create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0);
9555         create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0);
9556         create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0);
9557
9558         let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000);
9559         assert_eq!(route.paths.len(), 2);
9560         route.paths.sort_by(|path_a, _| {
9561                 // Sort the path so that the path through nodes[1] comes first
9562                 if path_a[0].pubkey == nodes[1].node.get_our_node_id() {
9563                         core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
9564         });
9565
9566         send_along_route_with_secret(&nodes[0], route.clone(), &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 15_000_000, payment_hash, payment_secret);
9567         // nodes[3] has now received a PaymentClaimable event...which it will take some (exorbitant)
9568         // amount of time to respond to.
9569
9570         // Connect some blocks to time out the payment
9571         connect_blocks(&nodes[3], TEST_FINAL_CLTV);
9572         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // To get the same height for sending later
9573
9574         let failed_destinations = vec![
9575                 HTLCDestination::FailedPayment { payment_hash },
9576                 HTLCDestination::FailedPayment { payment_hash },
9577         ];
9578         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations);
9579
9580         pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash);
9581
9582         // nodes[1] now retries one of the two paths...
9583         nodes[0].node.send_payment_with_route(&route, payment_hash,
9584                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9585         check_added_monitors!(nodes[0], 2);
9586
9587         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9588         assert_eq!(events.len(), 2);
9589         let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
9590         pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_1_msgs, false, None);
9591
9592         // At this point nodes[3] has received one half of the payment, and the user goes to handle
9593         // that PaymentClaimable event they got hours ago and never handled...we should refuse to claim.
9594         nodes[3].node.claim_funds(payment_preimage);
9595         check_added_monitors!(nodes[3], 0);
9596         assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty());
9597 }
9598
9599 /// The possible events which may trigger a `max_dust_htlc_exposure` breach
9600 #[derive(Clone, Copy, PartialEq)]
9601 enum ExposureEvent {
9602         /// Breach occurs at HTLC forwarding (see `send_htlc`)
9603         AtHTLCForward,
9604         /// Breach occurs at HTLC reception (see `update_add_htlc`)
9605         AtHTLCReception,
9606         /// Breach occurs at outbound update_fee (see `send_update_fee`)
9607         AtUpdateFeeOutbound,
9608 }
9609
9610 fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool) {
9611         // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat`
9612         // policy.
9613         //
9614         // At HTLC forward (`send_payment()`), if the sum of the trimmed-to-dust HTLC inbound and
9615         // trimmed-to-dust HTLC outbound balance and this new payment as included on next
9616         // counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll reject the
9617         // update. At HTLC reception (`update_add_htlc()`), if the sum of the trimmed-to-dust HTLC
9618         // inbound and trimmed-to-dust HTLC outbound balance and this new received HTLC as included
9619         // on next counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll fail
9620         // the update. Note, we return a `temporary_channel_failure` (0x1000 | 7), as the channel
9621         // might be available again for HTLC processing once the dust bandwidth has cleared up.
9622
9623         let chanmon_cfgs = create_chanmon_cfgs(2);
9624         let mut config = test_default_channel_config();
9625         config.channel_config.max_dust_htlc_exposure_msat = 5_000_000; // default setting value
9626         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9627         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
9628         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9629
9630         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
9631         let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9632         open_channel.max_htlc_value_in_flight_msat = 50_000_000;
9633         open_channel.max_accepted_htlcs = 60;
9634         if on_holder_tx {
9635                 open_channel.dust_limit_satoshis = 546;
9636         }
9637         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
9638         let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
9639         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
9640
9641         let opt_anchors = false;
9642
9643         let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
9644
9645         if on_holder_tx {
9646                 let mut node_0_per_peer_lock;
9647                 let mut node_0_peer_state_lock;
9648                 let mut chan = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id);
9649                 chan.holder_dust_limit_satoshis = 546;
9650         }
9651
9652         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9653         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
9654         check_added_monitors!(nodes[1], 1);
9655         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9656
9657         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
9658         check_added_monitors!(nodes[0], 1);
9659         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9660
9661         let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
9662         let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
9663         update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
9664
9665         let dust_buffer_feerate = {
9666                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
9667                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
9668                 let chan = chan_lock.channel_by_id.get(&channel_id).unwrap();
9669                 chan.get_dust_buffer_feerate(None) as u64
9670         };
9671         let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(opt_anchors) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
9672         let dust_outbound_htlc_on_holder_tx: u64 = config.channel_config.max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
9673
9674         let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(opt_anchors) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
9675         let dust_inbound_htlc_on_holder_tx: u64 = config.channel_config.max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat;
9676
9677         let dust_htlc_on_counterparty_tx: u64 = 25;
9678         let dust_htlc_on_counterparty_tx_msat: u64 = config.channel_config.max_dust_htlc_exposure_msat / dust_htlc_on_counterparty_tx;
9679
9680         if on_holder_tx {
9681                 if dust_outbound_balance {
9682                         // Outbound dust threshold: 2223 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`)
9683                         // Outbound dust balance: 4372 sats
9684                         // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2132 sats
9685                         for _ in 0..dust_outbound_htlc_on_holder_tx {
9686                                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_outbound_htlc_on_holder_tx_msat);
9687                                 nodes[0].node.send_payment_with_route(&route, payment_hash,
9688                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9689                         }
9690                 } else {
9691                         // Inbound dust threshold: 2324 sats (`dust_buffer_feerate` * HTLC_SUCCESS_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`)
9692                         // Inbound dust balance: 4372 sats
9693                         // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2031 sats
9694                         for _ in 0..dust_inbound_htlc_on_holder_tx {
9695                                 route_payment(&nodes[1], &[&nodes[0]], dust_inbound_htlc_on_holder_tx_msat);
9696                         }
9697                 }
9698         } else {
9699                 if dust_outbound_balance {
9700                         // Outbound dust threshold: 2132 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`)
9701                         // Outbound dust balance: 5000 sats
9702                         for _ in 0..dust_htlc_on_counterparty_tx {
9703                                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_htlc_on_counterparty_tx_msat);
9704                                 nodes[0].node.send_payment_with_route(&route, payment_hash,
9705                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9706                         }
9707                 } else {
9708                         // Inbound dust threshold: 2031 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`)
9709                         // Inbound dust balance: 5000 sats
9710                         for _ in 0..dust_htlc_on_counterparty_tx {
9711                                 route_payment(&nodes[1], &[&nodes[0]], dust_htlc_on_counterparty_tx_msat);
9712                         }
9713                 }
9714         }
9715
9716         let dust_overflow = dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx + 1);
9717         if exposure_breach_event == ExposureEvent::AtHTLCForward {
9718                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat });
9719                 let mut config = UserConfig::default();
9720                 // With default dust exposure: 5000 sats
9721                 if on_holder_tx {
9722                         let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * (dust_outbound_htlc_on_holder_tx + 1);
9723                         let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * dust_inbound_htlc_on_holder_tx + dust_outbound_htlc_on_holder_tx_msat;
9724                         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash,
9725                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
9726                                 ), true, APIError::ChannelUnavailable { ref err },
9727                                 assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, config.channel_config.max_dust_htlc_exposure_msat)));
9728                 } else {
9729                         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash,
9730                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
9731                                 ), true, APIError::ChannelUnavailable { ref err },
9732                                 assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", dust_overflow, config.channel_config.max_dust_htlc_exposure_msat)));
9733                 }
9734         } else if exposure_breach_event == ExposureEvent::AtHTLCReception {
9735                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat });
9736                 nodes[1].node.send_payment_with_route(&route, payment_hash,
9737                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9738                 check_added_monitors!(nodes[1], 1);
9739                 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
9740                 assert_eq!(events.len(), 1);
9741                 let payment_event = SendEvent::from_event(events.remove(0));
9742                 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
9743                 // With default dust exposure: 5000 sats
9744                 if on_holder_tx {
9745                         // Outbound dust balance: 6399 sats
9746                         let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1);
9747                         let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat;
9748                         nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, config.channel_config.max_dust_htlc_exposure_msat), 1);
9749                 } else {
9750                         // Outbound dust balance: 5200 sats
9751                         nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", dust_overflow, config.channel_config.max_dust_htlc_exposure_msat), 1);
9752                 }
9753         } else if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound {
9754                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 2_500_000);
9755                 nodes[0].node.send_payment_with_route(&route, payment_hash,
9756                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9757                 {
9758                         let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
9759                         *feerate_lock = *feerate_lock * 10;
9760                 }
9761                 nodes[0].node.timer_tick_occurred();
9762                 check_added_monitors!(nodes[0], 1);
9763                 nodes[0].logger.assert_log_contains("lightning::ln::channel", "Cannot afford to send new feerate at 2530 without infringing max dust htlc exposure", 1);
9764         }
9765
9766         let _ = nodes[0].node.get_and_clear_pending_msg_events();
9767         let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
9768         added_monitors.clear();
9769 }
9770
9771 #[test]
9772 fn test_max_dust_htlc_exposure() {
9773         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true);
9774         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true);
9775         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true);
9776         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false);
9777         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false);
9778         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false);
9779         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true);
9780         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false);
9781         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true);
9782         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false);
9783         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false);
9784         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true);
9785 }
9786
9787 #[test]
9788 fn test_non_final_funding_tx() {
9789         let chanmon_cfgs = create_chanmon_cfgs(2);
9790         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9791         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9792         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9793
9794         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
9795         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9796         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
9797         let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
9798         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
9799
9800         let best_height = nodes[0].node.best_block.read().unwrap().height();
9801
9802         let chan_id = *nodes[0].network_chan_count.borrow();
9803         let events = nodes[0].node.get_and_clear_pending_events();
9804         let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::Script::new(), sequence: Sequence(1), witness: Witness::from_vec(vec!(vec!(1))) };
9805         assert_eq!(events.len(), 1);
9806         let mut tx = match events[0] {
9807                 Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => {
9808                         // Timelock the transaction _beyond_ the best client height + 2.
9809                         Transaction { version: chan_id as i32, lock_time: PackedLockTime(best_height + 3), input: vec![input], output: vec![TxOut {
9810                                 value: *channel_value_satoshis, script_pubkey: output_script.clone(),
9811                         }]}
9812                 },
9813                 _ => panic!("Unexpected event"),
9814         };
9815         // Transaction should fail as it's evaluated as non-final for propagation.
9816         match nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()) {
9817                 Err(APIError::APIMisuseError { err }) => {
9818                         assert_eq!(format!("Funding transaction absolute timelock is non-final"), err);
9819                 },
9820                 _ => panic!()
9821         }
9822
9823         // However, transaction should be accepted if it's in a +2 headroom from best block.
9824         tx.lock_time = PackedLockTime(tx.lock_time.0 - 1);
9825         assert!(nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
9826         get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9827 }
9828
9829 #[test]
9830 fn accept_busted_but_better_fee() {
9831         // If a peer sends us a fee update that is too low, but higher than our previous channel
9832         // feerate, we should accept it. In the future we may want to consider closing the channel
9833         // later, but for now we only accept the update.
9834         let mut chanmon_cfgs = create_chanmon_cfgs(2);
9835         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9836         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9837         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9838
9839         create_chan_between_nodes(&nodes[0], &nodes[1]);
9840
9841         // Set nodes[1] to expect 5,000 sat/kW.
9842         {
9843                 let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
9844                 *feerate_lock = 5000;
9845         }
9846
9847         // If nodes[0] increases their feerate, even if its not enough, nodes[1] should accept it.
9848         {
9849                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
9850                 *feerate_lock = 1000;
9851         }
9852         nodes[0].node.timer_tick_occurred();
9853         check_added_monitors!(nodes[0], 1);
9854
9855         let events = nodes[0].node.get_and_clear_pending_msg_events();
9856         assert_eq!(events.len(), 1);
9857         match events[0] {
9858                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
9859                         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
9860                         commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false);
9861                 },
9862                 _ => panic!("Unexpected event"),
9863         };
9864
9865         // If nodes[0] increases their feerate further, even if its not enough, nodes[1] should accept
9866         // it.
9867         {
9868                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
9869                 *feerate_lock = 2000;
9870         }
9871         nodes[0].node.timer_tick_occurred();
9872         check_added_monitors!(nodes[0], 1);
9873
9874         let events = nodes[0].node.get_and_clear_pending_msg_events();
9875         assert_eq!(events.len(), 1);
9876         match events[0] {
9877                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
9878                         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
9879                         commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false);
9880                 },
9881                 _ => panic!("Unexpected event"),
9882         };
9883
9884         // However, if nodes[0] decreases their feerate, nodes[1] should reject it and close the
9885         // channel.
9886         {
9887                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
9888                 *feerate_lock = 1000;
9889         }
9890         nodes[0].node.timer_tick_occurred();
9891         check_added_monitors!(nodes[0], 1);
9892
9893         let events = nodes[0].node.get_and_clear_pending_msg_events();
9894         assert_eq!(events.len(), 1);
9895         match events[0] {
9896                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
9897                         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
9898                         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError {
9899                                 err: "Peer's feerate much too low. Actual: 1000. Our expected lower limit: 5000 (- 250)".to_owned() });
9900                         check_closed_broadcast!(nodes[1], true);
9901                         check_added_monitors!(nodes[1], 1);
9902                 },
9903                 _ => panic!("Unexpected event"),
9904         };
9905 }
9906
9907 fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash: bool) {
9908         let mut chanmon_cfgs = create_chanmon_cfgs(2);
9909         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9910         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9911         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9912         let min_final_cltv_expiry_delta = 120;
9913         let final_cltv_expiry_delta = if valid_delta { min_final_cltv_expiry_delta + 2 } else {
9914                 min_final_cltv_expiry_delta - 2 };
9915         let recv_value = 100_000;
9916
9917         create_chan_between_nodes(&nodes[0], &nodes[1]);
9918
9919         let payment_parameters = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), final_cltv_expiry_delta as u32);
9920         let (payment_hash, payment_preimage, payment_secret) = if use_user_hash {
9921                 let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[1],
9922                         Some(recv_value), Some(min_final_cltv_expiry_delta));
9923                 (payment_hash, payment_preimage, payment_secret)
9924         } else {
9925                 let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(recv_value), 7200, Some(min_final_cltv_expiry_delta)).unwrap();
9926                 (payment_hash, nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap(), payment_secret)
9927         };
9928         let route = get_route!(nodes[0], payment_parameters, recv_value, final_cltv_expiry_delta as u32).unwrap();
9929         nodes[0].node.send_payment_with_route(&route, payment_hash,
9930                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9931         check_added_monitors!(nodes[0], 1);
9932         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9933         assert_eq!(events.len(), 1);
9934         let mut payment_event = SendEvent::from_event(events.pop().unwrap());
9935         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9936         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
9937         expect_pending_htlcs_forwardable!(nodes[1]);
9938
9939         if valid_delta {
9940                 expect_payment_claimable!(nodes[1], payment_hash, payment_secret, recv_value, if use_user_hash {
9941                         None } else { Some(payment_preimage) }, nodes[1].node.get_our_node_id());
9942
9943                 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
9944         } else {
9945                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
9946
9947                 check_added_monitors!(nodes[1], 1);
9948
9949                 let fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9950                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates.update_fail_htlcs[0]);
9951                 commitment_signed_dance!(nodes[0], nodes[1], fail_updates.commitment_signed, false, true);
9952
9953                 expect_payment_failed!(nodes[0], payment_hash, true);
9954         }
9955 }
9956
9957 #[test]
9958 fn test_payment_with_custom_min_cltv_expiry_delta() {
9959         do_payment_with_custom_min_final_cltv_expiry(false, false);
9960         do_payment_with_custom_min_final_cltv_expiry(false, true);
9961         do_payment_with_custom_min_final_cltv_expiry(true, false);
9962         do_payment_with_custom_min_final_cltv_expiry(true, true);
9963 }