]> git.bitcoin.ninja Git - rust-lightning/blob - lightning/src/ln/functional_tests.rs
move static channelmanager functions into their own file
[rust-lightning] / lightning / src / ln / functional_tests.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 //! Tests that test standing up a network of ChannelManagers, creating channels, sending
11 //! payments/messages between them, and often checking the resulting ChannelMonitors are able to
12 //! claim outputs on-chain.
13
14 use crate::chain;
15 use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
16 use crate::chain::chaininterface::LowerBoundedFeeEstimator;
17 use crate::chain::channelmonitor;
18 use crate::chain::channelmonitor::{CLOSED_CHANNEL_UPDATE_ID, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
19 use crate::chain::transaction::OutPoint;
20 use crate::sign::{EcdsaChannelSigner, EntropySource, SignerProvider};
21 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
22 use crate::ln::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash};
23 use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase};
24 use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA};
25 use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError};
26 use crate::ln::{chan_utils, onion_utils};
27 use crate::ln::chan_utils::{OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment};
28 use crate::routing::gossip::{NetworkGraph, NetworkUpdate};
29 use crate::routing::router::{Path, PaymentParameters, Route, RouteHop, get_route, RouteParameters};
30 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
31 use crate::ln::msgs;
32 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
33 use crate::util::test_channel_signer::TestChannelSigner;
34 use crate::util::test_utils::{self, WatchtowerPersister};
35 use crate::util::errors::APIError;
36 use crate::util::ser::{Writeable, ReadableArgs};
37 use crate::util::string::UntrustedString;
38 use crate::util::config::{UserConfig, MaxDustHTLCExposure};
39
40 use bitcoin::hash_types::BlockHash;
41 use bitcoin::blockdata::locktime::absolute::LockTime;
42 use bitcoin::blockdata::script::{Builder, ScriptBuf};
43 use bitcoin::blockdata::opcodes;
44 use bitcoin::blockdata::constants::ChainHash;
45 use bitcoin::network::constants::Network;
46 use bitcoin::{Sequence, Transaction, TxIn, TxOut, Witness};
47 use bitcoin::OutPoint as BitcoinOutPoint;
48
49 use bitcoin::secp256k1::Secp256k1;
50 use bitcoin::secp256k1::{PublicKey,SecretKey};
51
52 use regex;
53
54 use crate::io;
55 use crate::prelude::*;
56 use alloc::collections::BTreeSet;
57 use core::default::Default;
58 use core::iter::repeat;
59 use bitcoin::hashes::Hash;
60 use crate::sync::{Arc, Mutex, RwLock};
61
62 use crate::ln::functional_test_utils::*;
63 use crate::ln::chan_utils::CommitmentTransaction;
64
65 use super::channel::UNFUNDED_CHANNEL_AGE_LIMIT_TICKS;
66
67 #[test]
68 fn test_insane_channel_opens() {
69         // Stand up a network of 2 nodes
70         use crate::ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS;
71         let mut cfg = UserConfig::default();
72         cfg.channel_handshake_limits.max_funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1;
73         let chanmon_cfgs = create_chanmon_cfgs(2);
74         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
75         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(cfg)]);
76         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
77
78         // Instantiate channel parameters where we push the maximum msats given our
79         // funding satoshis
80         let channel_value_sat = 31337; // same as funding satoshis
81         let channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg);
82         let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000;
83
84         // Have node0 initiate a channel to node1 with aforementioned parameters
85         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_sat, push_msat, 42, None, None).unwrap();
86
87         // Extract the channel open message from node0 to node1
88         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
89
90         // Test helper that asserts we get the correct error string given a mutator
91         // that supposedly makes the channel open message insane
92         let insane_open_helper = |expected_error_str: &str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| {
93                 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &message_mutator(open_channel_message.clone()));
94                 let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
95                 assert_eq!(msg_events.len(), 1);
96                 let expected_regex = regex::Regex::new(expected_error_str).unwrap();
97                 if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] {
98                         match action {
99                                 &ErrorAction::SendErrorMessage { .. } => {
100                                         nodes[1].logger.assert_log_regex("lightning::ln::channelmanager", expected_regex, 1);
101                                 },
102                                 _ => panic!("unexpected event!"),
103                         }
104                 } else { assert!(false); }
105         };
106
107         use crate::ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT;
108
109         // Test all mutations that would make the channel open message insane
110         insane_open_helper(format!("Per our config, funding must be at most {}. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; msg });
111         insane_open_helper(format!("Funding must be smaller than the total bitcoin supply. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; msg });
112
113         insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.funding_satoshis + 1; msg });
114
115         insane_open_helper(r"push_msat \d+ was larger than channel amount minus reserve \(\d+\)", |mut msg| { msg.push_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
116
117         insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.dust_limit_satoshis = msg.funding_satoshis + 1 ; msg });
118
119         insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.htlc_minimum_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg });
120
121         insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg });
122
123         insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { msg.max_accepted_htlcs = 0; msg });
124
125         insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.max_accepted_htlcs = 484; msg });
126 }
127
128 #[test]
129 fn test_funding_exceeds_no_wumbo_limit() {
130         // Test that if a peer does not support wumbo channels, we'll refuse to open a wumbo channel to
131         // them.
132         use crate::ln::channel::MAX_FUNDING_SATOSHIS_NO_WUMBO;
133         let chanmon_cfgs = create_chanmon_cfgs(2);
134         let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
135         *node_cfgs[1].override_init_features.borrow_mut() = Some(channelmanager::provided_init_features(&test_default_channel_config()).clear_wumbo());
136         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
137         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
138
139         match nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), MAX_FUNDING_SATOSHIS_NO_WUMBO + 1, 0, 42, None, None) {
140                 Err(APIError::APIMisuseError { err }) => {
141                         assert_eq!(format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, MAX_FUNDING_SATOSHIS_NO_WUMBO + 1), err);
142                 },
143                 _ => panic!()
144         }
145 }
146
147 fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
148         // A peer providing a channel_reserve_satoshis of 0 (or less than our dust limit) is insecure,
149         // but only for them. Because some LSPs do it with some level of trust of the clients (for a
150         // substantial UX improvement), we explicitly allow it. Because it's unlikely to happen often
151         // in normal testing, we test it explicitly here.
152         let chanmon_cfgs = create_chanmon_cfgs(2);
153         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
154         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
155         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
156         let default_config = UserConfig::default();
157
158         // Have node0 initiate a channel to node1 with aforementioned parameters
159         let mut push_amt = 100_000_000;
160         let feerate_per_kw = 253;
161         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
162         push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(&channel_type_features) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000;
163         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
164
165         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None, None).unwrap();
166         let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
167         if !send_from_initiator {
168                 open_channel_message.channel_reserve_satoshis = 0;
169                 open_channel_message.max_htlc_value_in_flight_msat = 100_000_000;
170         }
171         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
172
173         // Extract the channel accept message from node1 to node0
174         let mut accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
175         if send_from_initiator {
176                 accept_channel_message.channel_reserve_satoshis = 0;
177                 accept_channel_message.max_htlc_value_in_flight_msat = 100_000_000;
178         }
179         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
180         {
181                 let sender_node = if send_from_initiator { &nodes[1] } else { &nodes[0] };
182                 let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] };
183                 let mut sender_node_per_peer_lock;
184                 let mut sender_node_peer_state_lock;
185
186                 let channel_phase = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
187                 match channel_phase {
188                         ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
189                                 let chan_context = channel_phase.context_mut();
190                                 chan_context.holder_selected_channel_reserve_satoshis = 0;
191                                 chan_context.holder_max_htlc_value_in_flight_msat = 100_000_000;
192                         },
193                         ChannelPhase::Funded(_) => assert!(false),
194                 }
195         }
196
197         let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id);
198         let funding_msgs = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &funding_tx);
199         create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_msgs.0);
200
201         // nodes[0] should now be able to send the full balance to nodes[1], violating nodes[1]'s
202         // security model if it ever tries to send funds back to nodes[0] (but that's not our problem).
203         if send_from_initiator {
204                 send_payment(&nodes[0], &[&nodes[1]], 100_000_000
205                         // Note that for outbound channels we have to consider the commitment tx fee and the
206                         // "fee spike buffer", which is currently a multiple of the total commitment tx fee as
207                         // well as an additional HTLC.
208                         - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * commit_tx_fee_msat(feerate_per_kw, 2, &channel_type_features));
209         } else {
210                 send_payment(&nodes[1], &[&nodes[0]], push_amt);
211         }
212 }
213
214 #[test]
215 fn test_counterparty_no_reserve() {
216         do_test_counterparty_no_reserve(true);
217         do_test_counterparty_no_reserve(false);
218 }
219
220 #[test]
221 fn test_async_inbound_update_fee() {
222         let chanmon_cfgs = create_chanmon_cfgs(2);
223         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
224         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
225         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
226         create_announced_chan_between_nodes(&nodes, 0, 1);
227
228         // balancing
229         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
230
231         // A                                        B
232         // update_fee                            ->
233         // send (1) commitment_signed            -.
234         //                                       <- update_add_htlc/commitment_signed
235         // send (2) RAA (awaiting remote revoke) -.
236         // (1) commitment_signed is delivered    ->
237         //                                       .- send (3) RAA (awaiting remote revoke)
238         // (2) RAA is delivered                  ->
239         //                                       .- send (4) commitment_signed
240         //                                       <- (3) RAA is delivered
241         // send (5) commitment_signed            -.
242         //                                       <- (4) commitment_signed is delivered
243         // send (6) RAA                          -.
244         // (5) commitment_signed is delivered    ->
245         //                                       <- RAA
246         // (6) RAA is delivered                  ->
247
248         // First nodes[0] generates an update_fee
249         {
250                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
251                 *feerate_lock += 20;
252         }
253         nodes[0].node.timer_tick_occurred();
254         check_added_monitors!(nodes[0], 1);
255
256         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
257         assert_eq!(events_0.len(), 1);
258         let (update_msg, commitment_signed) = match events_0[0] { // (1)
259                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
260                         (update_fee.as_ref(), commitment_signed)
261                 },
262                 _ => panic!("Unexpected event"),
263         };
264
265         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
266
267         // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
268         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000);
269         nodes[1].node.send_payment_with_route(&route, our_payment_hash,
270                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
271         check_added_monitors!(nodes[1], 1);
272
273         let payment_event = {
274                 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
275                 assert_eq!(events_1.len(), 1);
276                 SendEvent::from_event(events_1.remove(0))
277         };
278         assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
279         assert_eq!(payment_event.msgs.len(), 1);
280
281         // ...now when the messages get delivered everyone should be happy
282         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
283         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2)
284         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
285         // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
286         check_added_monitors!(nodes[0], 1);
287
288         // deliver(1), generate (3):
289         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
290         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
291         // nodes[1] is awaiting nodes[0] revoke_and_ack so get_event_msg's assert(len == 1) passes
292         check_added_monitors!(nodes[1], 1);
293
294         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack); // deliver (2)
295         let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
296         assert!(bs_update.update_add_htlcs.is_empty()); // (4)
297         assert!(bs_update.update_fulfill_htlcs.is_empty()); // (4)
298         assert!(bs_update.update_fail_htlcs.is_empty()); // (4)
299         assert!(bs_update.update_fail_malformed_htlcs.is_empty()); // (4)
300         assert!(bs_update.update_fee.is_none()); // (4)
301         check_added_monitors!(nodes[1], 1);
302
303         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); // deliver (3)
304         let as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
305         assert!(as_update.update_add_htlcs.is_empty()); // (5)
306         assert!(as_update.update_fulfill_htlcs.is_empty()); // (5)
307         assert!(as_update.update_fail_htlcs.is_empty()); // (5)
308         assert!(as_update.update_fail_malformed_htlcs.is_empty()); // (5)
309         assert!(as_update.update_fee.is_none()); // (5)
310         check_added_monitors!(nodes[0], 1);
311
312         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed); // deliver (4)
313         let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
314         // only (6) so get_event_msg's assert(len == 1) passes
315         check_added_monitors!(nodes[0], 1);
316
317         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update.commitment_signed); // deliver (5)
318         let bs_second_revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
319         check_added_monitors!(nodes[1], 1);
320
321         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke);
322         check_added_monitors!(nodes[0], 1);
323
324         let events_2 = nodes[0].node.get_and_clear_pending_events();
325         assert_eq!(events_2.len(), 1);
326         match events_2[0] {
327                 Event::PendingHTLCsForwardable {..} => {}, // If we actually processed we'd receive the payment
328                 _ => panic!("Unexpected event"),
329         }
330
331         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke); // deliver (6)
332         check_added_monitors!(nodes[1], 1);
333 }
334
335 #[test]
336 fn test_update_fee_unordered_raa() {
337         // Just the intro to the previous test followed by an out-of-order RAA (which caused a
338         // crash in an earlier version of the update_fee patch)
339         let chanmon_cfgs = create_chanmon_cfgs(2);
340         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
341         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
342         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
343         create_announced_chan_between_nodes(&nodes, 0, 1);
344
345         // balancing
346         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
347
348         // First nodes[0] generates an update_fee
349         {
350                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
351                 *feerate_lock += 20;
352         }
353         nodes[0].node.timer_tick_occurred();
354         check_added_monitors!(nodes[0], 1);
355
356         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
357         assert_eq!(events_0.len(), 1);
358         let update_msg = match events_0[0] { // (1)
359                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
360                         update_fee.as_ref()
361                 },
362                 _ => panic!("Unexpected event"),
363         };
364
365         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
366
367         // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
368         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000);
369         nodes[1].node.send_payment_with_route(&route, our_payment_hash,
370                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
371         check_added_monitors!(nodes[1], 1);
372
373         let payment_event = {
374                 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
375                 assert_eq!(events_1.len(), 1);
376                 SendEvent::from_event(events_1.remove(0))
377         };
378         assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
379         assert_eq!(payment_event.msgs.len(), 1);
380
381         // ...now when the messages get delivered everyone should be happy
382         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
383         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2)
384         let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
385         // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
386         check_added_monitors!(nodes[0], 1);
387
388         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg); // deliver (2)
389         check_added_monitors!(nodes[1], 1);
390
391         // We can't continue, sadly, because our (1) now has a bogus signature
392 }
393
394 #[test]
395 fn test_multi_flight_update_fee() {
396         let chanmon_cfgs = create_chanmon_cfgs(2);
397         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
398         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
399         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
400         create_announced_chan_between_nodes(&nodes, 0, 1);
401
402         // A                                        B
403         // update_fee/commitment_signed          ->
404         //                                       .- send (1) RAA and (2) commitment_signed
405         // update_fee (never committed)          ->
406         // (3) update_fee                        ->
407         // We have to manually generate the above update_fee, it is allowed by the protocol but we
408         // don't track which updates correspond to which revoke_and_ack responses so we're in
409         // AwaitingRAA mode and will not generate the update_fee yet.
410         //                                       <- (1) RAA delivered
411         // (3) is generated and send (4) CS      -.
412         // Note that A cannot generate (4) prior to (1) being delivered as it otherwise doesn't
413         // know the per_commitment_point to use for it.
414         //                                       <- (2) commitment_signed delivered
415         // revoke_and_ack                        ->
416         //                                          B should send no response here
417         // (4) commitment_signed delivered       ->
418         //                                       <- RAA/commitment_signed delivered
419         // revoke_and_ack                        ->
420
421         // First nodes[0] generates an update_fee
422         let initial_feerate;
423         {
424                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
425                 initial_feerate = *feerate_lock;
426                 *feerate_lock = initial_feerate + 20;
427         }
428         nodes[0].node.timer_tick_occurred();
429         check_added_monitors!(nodes[0], 1);
430
431         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
432         assert_eq!(events_0.len(), 1);
433         let (update_msg_1, commitment_signed_1) = match events_0[0] { // (1)
434                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
435                         (update_fee.as_ref().unwrap(), commitment_signed)
436                 },
437                 _ => panic!("Unexpected event"),
438         };
439
440         // Deliver first update_fee/commitment_signed pair, generating (1) and (2):
441         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg_1);
442         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed_1);
443         let (bs_revoke_msg, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
444         check_added_monitors!(nodes[1], 1);
445
446         // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment
447         // transaction:
448         {
449                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
450                 *feerate_lock = initial_feerate + 40;
451         }
452         nodes[0].node.timer_tick_occurred();
453         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
454         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
455
456         // Create the (3) update_fee message that nodes[0] will generate before it does...
457         let mut update_msg_2 = msgs::UpdateFee {
458                 channel_id: update_msg_1.channel_id.clone(),
459                 feerate_per_kw: (initial_feerate + 30) as u32,
460         };
461
462         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2);
463
464         update_msg_2.feerate_per_kw = (initial_feerate + 40) as u32;
465         // Deliver (3)
466         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2);
467
468         // Deliver (1), generating (3) and (4)
469         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg);
470         let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
471         check_added_monitors!(nodes[0], 1);
472         assert!(as_second_update.update_add_htlcs.is_empty());
473         assert!(as_second_update.update_fulfill_htlcs.is_empty());
474         assert!(as_second_update.update_fail_htlcs.is_empty());
475         assert!(as_second_update.update_fail_malformed_htlcs.is_empty());
476         // Check that the update_fee newly generated matches what we delivered:
477         assert_eq!(as_second_update.update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id);
478         assert_eq!(as_second_update.update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw);
479
480         // Deliver (2) commitment_signed
481         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
482         let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
483         check_added_monitors!(nodes[0], 1);
484         // No commitment_signed so get_event_msg's assert(len == 1) passes
485
486         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg);
487         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
488         check_added_monitors!(nodes[1], 1);
489
490         // Delever (4)
491         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed);
492         let (bs_second_revoke, bs_second_commitment) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
493         check_added_monitors!(nodes[1], 1);
494
495         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke);
496         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
497         check_added_monitors!(nodes[0], 1);
498
499         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment);
500         let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
501         // No commitment_signed so get_event_msg's assert(len == 1) passes
502         check_added_monitors!(nodes[0], 1);
503
504         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke);
505         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
506         check_added_monitors!(nodes[1], 1);
507 }
508
509 fn do_test_sanity_on_in_flight_opens(steps: u8) {
510         // Previously, we had issues deserializing channels when we hadn't connected the first block
511         // after creation. To catch that and similar issues, we lean on the Node::drop impl to test
512         // serialization round-trips and simply do steps towards opening a channel and then drop the
513         // Node objects.
514
515         let chanmon_cfgs = create_chanmon_cfgs(2);
516         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
517         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
518         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
519
520         if steps & 0b1000_0000 != 0{
521                 let block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new());
522                 connect_block(&nodes[0], &block);
523                 connect_block(&nodes[1], &block);
524         }
525
526         if steps & 0x0f == 0 { return; }
527         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
528         let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
529
530         if steps & 0x0f == 1 { return; }
531         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
532         let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
533
534         if steps & 0x0f == 2 { return; }
535         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
536
537         let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
538
539         if steps & 0x0f == 3 { return; }
540         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
541         check_added_monitors!(nodes[0], 0);
542         let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
543
544         if steps & 0x0f == 4 { return; }
545         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
546         {
547                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
548                 assert_eq!(added_monitors.len(), 1);
549                 assert_eq!(added_monitors[0].0, funding_output);
550                 added_monitors.clear();
551         }
552         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
553
554         let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
555
556         if steps & 0x0f == 5 { return; }
557         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
558         {
559                 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
560                 assert_eq!(added_monitors.len(), 1);
561                 assert_eq!(added_monitors[0].0, funding_output);
562                 added_monitors.clear();
563         }
564
565         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
566         let events_4 = nodes[0].node.get_and_clear_pending_events();
567         assert_eq!(events_4.len(), 0);
568
569         if steps & 0x0f == 6 { return; }
570         create_chan_between_nodes_with_value_confirm_first(&nodes[0], &nodes[1], &tx, 2);
571
572         if steps & 0x0f == 7 { return; }
573         confirm_transaction_at(&nodes[0], &tx, 2);
574         connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
575         create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
576         expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id());
577 }
578
579 #[test]
580 fn test_sanity_on_in_flight_opens() {
581         do_test_sanity_on_in_flight_opens(0);
582         do_test_sanity_on_in_flight_opens(0 | 0b1000_0000);
583         do_test_sanity_on_in_flight_opens(1);
584         do_test_sanity_on_in_flight_opens(1 | 0b1000_0000);
585         do_test_sanity_on_in_flight_opens(2);
586         do_test_sanity_on_in_flight_opens(2 | 0b1000_0000);
587         do_test_sanity_on_in_flight_opens(3);
588         do_test_sanity_on_in_flight_opens(3 | 0b1000_0000);
589         do_test_sanity_on_in_flight_opens(4);
590         do_test_sanity_on_in_flight_opens(4 | 0b1000_0000);
591         do_test_sanity_on_in_flight_opens(5);
592         do_test_sanity_on_in_flight_opens(5 | 0b1000_0000);
593         do_test_sanity_on_in_flight_opens(6);
594         do_test_sanity_on_in_flight_opens(6 | 0b1000_0000);
595         do_test_sanity_on_in_flight_opens(7);
596         do_test_sanity_on_in_flight_opens(7 | 0b1000_0000);
597         do_test_sanity_on_in_flight_opens(8);
598         do_test_sanity_on_in_flight_opens(8 | 0b1000_0000);
599 }
600
601 #[test]
602 fn test_update_fee_vanilla() {
603         let chanmon_cfgs = create_chanmon_cfgs(2);
604         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
605         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
606         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
607         create_announced_chan_between_nodes(&nodes, 0, 1);
608
609         {
610                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
611                 *feerate_lock += 25;
612         }
613         nodes[0].node.timer_tick_occurred();
614         check_added_monitors!(nodes[0], 1);
615
616         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
617         assert_eq!(events_0.len(), 1);
618         let (update_msg, commitment_signed) = match events_0[0] {
619                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
620                         (update_fee.as_ref(), commitment_signed)
621                 },
622                 _ => panic!("Unexpected event"),
623         };
624         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
625
626         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
627         let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
628         check_added_monitors!(nodes[1], 1);
629
630         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
631         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
632         check_added_monitors!(nodes[0], 1);
633
634         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
635         let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
636         // No commitment_signed so get_event_msg's assert(len == 1) passes
637         check_added_monitors!(nodes[0], 1);
638
639         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
640         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
641         check_added_monitors!(nodes[1], 1);
642 }
643
644 #[test]
645 fn test_update_fee_that_funder_cannot_afford() {
646         let chanmon_cfgs = create_chanmon_cfgs(2);
647         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
648         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
649         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
650         let channel_value = 5000;
651         let push_sats = 700;
652         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, push_sats * 1000);
653         let channel_id = chan.2;
654         let secp_ctx = Secp256k1::new();
655         let default_config = UserConfig::default();
656         let bs_channel_reserve_sats = get_holder_selected_channel_reserve_satoshis(channel_value, &default_config);
657
658         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
659
660         // Calculate the maximum feerate that A can afford. Note that we don't send an update_fee
661         // CONCURRENT_INBOUND_HTLC_FEE_BUFFER HTLCs before actually running out of local balance, so we
662         // calculate two different feerates here - the expected local limit as well as the expected
663         // remote limit.
664         let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / (commitment_tx_base_weight(&channel_type_features) + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32;
665         let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / commitment_tx_base_weight(&channel_type_features)) as u32;
666         {
667                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
668                 *feerate_lock = feerate;
669         }
670         nodes[0].node.timer_tick_occurred();
671         check_added_monitors!(nodes[0], 1);
672         let update_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
673
674         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg.update_fee.unwrap());
675
676         commitment_signed_dance!(nodes[1], nodes[0], update_msg.commitment_signed, false);
677
678         // Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate set above.
679         {
680                 let commitment_tx = get_local_commitment_txn!(nodes[1], channel_id)[0].clone();
681
682                 //We made sure neither party's funds are below the dust limit and there are no HTLCs here
683                 assert_eq!(commitment_tx.output.len(), 2);
684                 let total_fee: u64 = commit_tx_fee_msat(feerate, 0, &channel_type_features) / 1000;
685                 let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value);
686                 actual_fee = channel_value - actual_fee;
687                 assert_eq!(total_fee, actual_fee);
688         }
689
690         {
691                 // Increment the feerate by a small constant, accounting for rounding errors
692                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
693                 *feerate_lock += 4;
694         }
695         nodes[0].node.timer_tick_occurred();
696         nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot afford to send new feerate at {}", feerate + 4), 1);
697         check_added_monitors!(nodes[0], 0);
698
699         const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654;
700
701         // Get the TestChannelSigner for each channel, which will be used to (1) get the keys
702         // needed to sign the new commitment tx and (2) sign the new commitment tx.
703         let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = {
704                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
705                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
706                 let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
707                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
708                 ).flatten().unwrap();
709                 let chan_signer = local_chan.get_signer();
710                 let pubkeys = chan_signer.as_ref().pubkeys();
711                 (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
712                  pubkeys.funding_pubkey)
713         };
714         let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point, remote_funding) = {
715                 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
716                 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
717                 let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
718                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
719                 ).flatten().unwrap();
720                 let chan_signer = remote_chan.get_signer();
721                 let pubkeys = chan_signer.as_ref().pubkeys();
722                 (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
723                  chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
724                  pubkeys.funding_pubkey)
725         };
726
727         // Assemble the set of keys we can use for signatures for our commitment_signed message.
728         let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint,
729                 &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint);
730
731         let res = {
732                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
733                 let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
734                 let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
735                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
736                 ).flatten().unwrap();
737                 let local_chan_signer = local_chan.get_signer();
738                 let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![];
739                 let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
740                         INITIAL_COMMITMENT_NUMBER - 1,
741                         push_sats,
742                         channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, &channel_type_features) / 1000,
743                         local_funding, remote_funding,
744                         commit_tx_keys.clone(),
745                         non_buffer_feerate + 4,
746                         &mut htlcs,
747                         &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
748                 );
749                 local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap()
750         };
751
752         let commit_signed_msg = msgs::CommitmentSigned {
753                 channel_id: chan.2,
754                 signature: res.0,
755                 htlc_signatures: res.1,
756                 #[cfg(taproot)]
757                 partial_signature_with_nonce: None,
758         };
759
760         let update_fee = msgs::UpdateFee {
761                 channel_id: chan.2,
762                 feerate_per_kw: non_buffer_feerate + 4,
763         };
764
765         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_fee);
766
767         //While producing the commitment_signed response after handling a received update_fee request the
768         //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve)
769         //Should produce and error.
770         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
771         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1);
772         check_added_monitors!(nodes[1], 1);
773         check_closed_broadcast!(nodes[1], true);
774         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") },
775                 [nodes[0].node.get_our_node_id()], channel_value);
776 }
777
778 #[test]
779 fn test_update_fee_with_fundee_update_add_htlc() {
780         let chanmon_cfgs = create_chanmon_cfgs(2);
781         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
782         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
783         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
784         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
785
786         // balancing
787         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
788
789         {
790                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
791                 *feerate_lock += 20;
792         }
793         nodes[0].node.timer_tick_occurred();
794         check_added_monitors!(nodes[0], 1);
795
796         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
797         assert_eq!(events_0.len(), 1);
798         let (update_msg, commitment_signed) = match events_0[0] {
799                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
800                         (update_fee.as_ref(), commitment_signed)
801                 },
802                 _ => panic!("Unexpected event"),
803         };
804         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
805         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
806         let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
807         check_added_monitors!(nodes[1], 1);
808
809         let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 800000);
810
811         // nothing happens since node[1] is in AwaitingRemoteRevoke
812         nodes[1].node.send_payment_with_route(&route, our_payment_hash,
813                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
814         {
815                 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
816                 assert_eq!(added_monitors.len(), 0);
817                 added_monitors.clear();
818         }
819         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
820         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
821         // node[1] has nothing to do
822
823         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
824         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
825         check_added_monitors!(nodes[0], 1);
826
827         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
828         let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
829         // No commitment_signed so get_event_msg's assert(len == 1) passes
830         check_added_monitors!(nodes[0], 1);
831         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
832         check_added_monitors!(nodes[1], 1);
833         // AwaitingRemoteRevoke ends here
834
835         let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
836         assert_eq!(commitment_update.update_add_htlcs.len(), 1);
837         assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0);
838         assert_eq!(commitment_update.update_fail_htlcs.len(), 0);
839         assert_eq!(commitment_update.update_fail_malformed_htlcs.len(), 0);
840         assert_eq!(commitment_update.update_fee.is_none(), true);
841
842         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &commitment_update.update_add_htlcs[0]);
843         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed);
844         check_added_monitors!(nodes[0], 1);
845         let (revoke, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
846
847         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke);
848         check_added_monitors!(nodes[1], 1);
849         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
850
851         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed);
852         check_added_monitors!(nodes[1], 1);
853         let revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
854         // No commitment_signed so get_event_msg's assert(len == 1) passes
855
856         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke);
857         check_added_monitors!(nodes[0], 1);
858         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
859
860         expect_pending_htlcs_forwardable!(nodes[0]);
861
862         let events = nodes[0].node.get_and_clear_pending_events();
863         assert_eq!(events.len(), 1);
864         match events[0] {
865                 Event::PaymentClaimable { .. } => { },
866                 _ => panic!("Unexpected event"),
867         };
868
869         claim_payment(&nodes[1], &vec!(&nodes[0])[..], our_payment_preimage);
870
871         send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
872         send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
873         close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
874         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
875         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
876 }
877
878 #[test]
879 fn test_update_fee() {
880         let chanmon_cfgs = create_chanmon_cfgs(2);
881         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
882         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
883         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
884         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
885         let channel_id = chan.2;
886
887         // A                                        B
888         // (1) update_fee/commitment_signed      ->
889         //                                       <- (2) revoke_and_ack
890         //                                       .- send (3) commitment_signed
891         // (4) update_fee/commitment_signed      ->
892         //                                       .- send (5) revoke_and_ack (no CS as we're awaiting a revoke)
893         //                                       <- (3) commitment_signed delivered
894         // send (6) revoke_and_ack               -.
895         //                                       <- (5) deliver revoke_and_ack
896         // (6) deliver revoke_and_ack            ->
897         //                                       .- send (7) commitment_signed in response to (4)
898         //                                       <- (7) deliver commitment_signed
899         // revoke_and_ack                        ->
900
901         // Create and deliver (1)...
902         let feerate;
903         {
904                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
905                 feerate = *feerate_lock;
906                 *feerate_lock = feerate + 20;
907         }
908         nodes[0].node.timer_tick_occurred();
909         check_added_monitors!(nodes[0], 1);
910
911         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
912         assert_eq!(events_0.len(), 1);
913         let (update_msg, commitment_signed) = match events_0[0] {
914                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
915                         (update_fee.as_ref(), commitment_signed)
916                 },
917                 _ => panic!("Unexpected event"),
918         };
919         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
920
921         // Generate (2) and (3):
922         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
923         let (revoke_msg, commitment_signed_0) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
924         check_added_monitors!(nodes[1], 1);
925
926         // Deliver (2):
927         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
928         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
929         check_added_monitors!(nodes[0], 1);
930
931         // Create and deliver (4)...
932         {
933                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
934                 *feerate_lock = feerate + 30;
935         }
936         nodes[0].node.timer_tick_occurred();
937         check_added_monitors!(nodes[0], 1);
938         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
939         assert_eq!(events_0.len(), 1);
940         let (update_msg, commitment_signed) = match events_0[0] {
941                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
942                         (update_fee.as_ref(), commitment_signed)
943                 },
944                 _ => panic!("Unexpected event"),
945         };
946
947         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
948         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
949         check_added_monitors!(nodes[1], 1);
950         // ... creating (5)
951         let revoke_msg = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
952         // No commitment_signed so get_event_msg's assert(len == 1) passes
953
954         // Handle (3), creating (6):
955         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_0);
956         check_added_monitors!(nodes[0], 1);
957         let revoke_msg_0 = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
958         // No commitment_signed so get_event_msg's assert(len == 1) passes
959
960         // Deliver (5):
961         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
962         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
963         check_added_monitors!(nodes[0], 1);
964
965         // Deliver (6), creating (7):
966         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg_0);
967         let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
968         assert!(commitment_update.update_add_htlcs.is_empty());
969         assert!(commitment_update.update_fulfill_htlcs.is_empty());
970         assert!(commitment_update.update_fail_htlcs.is_empty());
971         assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
972         assert!(commitment_update.update_fee.is_none());
973         check_added_monitors!(nodes[1], 1);
974
975         // Deliver (7)
976         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed);
977         check_added_monitors!(nodes[0], 1);
978         let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
979         // No commitment_signed so get_event_msg's assert(len == 1) passes
980
981         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
982         check_added_monitors!(nodes[1], 1);
983         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
984
985         assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30);
986         assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30);
987         close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
988         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
989         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
990 }
991
992 #[test]
993 fn fake_network_test() {
994         // Simple test which builds a network of ChannelManagers, connects them to each other, and
995         // tests that payments get routed and transactions broadcast in semi-reasonable ways.
996         let chanmon_cfgs = create_chanmon_cfgs(4);
997         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
998         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
999         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
1000
1001         // Create some initial channels
1002         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1003         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
1004         let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
1005
1006         // Rebalance the network a bit by relaying one payment through all the channels...
1007         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1008         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1009         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1010         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1011
1012         // Send some more payments
1013         send_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 1000000);
1014         send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1], &nodes[0])[..], 1000000);
1015         send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1])[..], 1000000);
1016
1017         // Test failure packets
1018         let payment_hash_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 1000000).1;
1019         fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], payment_hash_1);
1020
1021         // Add a new channel that skips 3
1022         let chan_4 = create_announced_chan_between_nodes(&nodes, 1, 3);
1023
1024         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 1000000);
1025         send_payment(&nodes[2], &vec!(&nodes[3])[..], 1000000);
1026         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1027         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1028         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1029         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1030         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1031
1032         // Do some rebalance loop payments, simultaneously
1033         let mut hops = Vec::with_capacity(3);
1034         hops.push(RouteHop {
1035                 pubkey: nodes[2].node.get_our_node_id(),
1036                 node_features: NodeFeatures::empty(),
1037                 short_channel_id: chan_2.0.contents.short_channel_id,
1038                 channel_features: ChannelFeatures::empty(),
1039                 fee_msat: 0,
1040                 cltv_expiry_delta: chan_3.0.contents.cltv_expiry_delta as u32,
1041                 maybe_announced_channel: true,
1042         });
1043         hops.push(RouteHop {
1044                 pubkey: nodes[3].node.get_our_node_id(),
1045                 node_features: NodeFeatures::empty(),
1046                 short_channel_id: chan_3.0.contents.short_channel_id,
1047                 channel_features: ChannelFeatures::empty(),
1048                 fee_msat: 0,
1049                 cltv_expiry_delta: chan_4.1.contents.cltv_expiry_delta as u32,
1050                 maybe_announced_channel: true,
1051         });
1052         hops.push(RouteHop {
1053                 pubkey: nodes[1].node.get_our_node_id(),
1054                 node_features: nodes[1].node.node_features(),
1055                 short_channel_id: chan_4.0.contents.short_channel_id,
1056                 channel_features: nodes[1].node.channel_features(),
1057                 fee_msat: 1000000,
1058                 cltv_expiry_delta: TEST_FINAL_CLTV,
1059                 maybe_announced_channel: true,
1060         });
1061         hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
1062         hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
1063         let payment_preimage_1 = send_along_route(&nodes[1],
1064                 Route { paths: vec![Path { hops, blinded_tail: None }], route_params: None },
1065                         &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0;
1066
1067         let mut hops = Vec::with_capacity(3);
1068         hops.push(RouteHop {
1069                 pubkey: nodes[3].node.get_our_node_id(),
1070                 node_features: NodeFeatures::empty(),
1071                 short_channel_id: chan_4.0.contents.short_channel_id,
1072                 channel_features: ChannelFeatures::empty(),
1073                 fee_msat: 0,
1074                 cltv_expiry_delta: chan_3.1.contents.cltv_expiry_delta as u32,
1075                 maybe_announced_channel: true,
1076         });
1077         hops.push(RouteHop {
1078                 pubkey: nodes[2].node.get_our_node_id(),
1079                 node_features: NodeFeatures::empty(),
1080                 short_channel_id: chan_3.0.contents.short_channel_id,
1081                 channel_features: ChannelFeatures::empty(),
1082                 fee_msat: 0,
1083                 cltv_expiry_delta: chan_2.1.contents.cltv_expiry_delta as u32,
1084                 maybe_announced_channel: true,
1085         });
1086         hops.push(RouteHop {
1087                 pubkey: nodes[1].node.get_our_node_id(),
1088                 node_features: nodes[1].node.node_features(),
1089                 short_channel_id: chan_2.0.contents.short_channel_id,
1090                 channel_features: nodes[1].node.channel_features(),
1091                 fee_msat: 1000000,
1092                 cltv_expiry_delta: TEST_FINAL_CLTV,
1093                 maybe_announced_channel: true,
1094         });
1095         hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
1096         hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
1097         let payment_hash_2 = send_along_route(&nodes[1],
1098                 Route { paths: vec![Path { hops, blinded_tail: None }], route_params: None },
1099                         &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1;
1100
1101         // Claim the rebalances...
1102         fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
1103         claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
1104
1105         // Close down the channels...
1106         close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
1107         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1108         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
1109         close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
1110         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
1111         check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1112         close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
1113         check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
1114         check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
1115         close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
1116         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
1117         check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1118 }
1119
1120 #[test]
1121 fn holding_cell_htlc_counting() {
1122         // Tests that HTLCs in the holding cell count towards the pending HTLC limits on outbound HTLCs
1123         // to ensure we don't end up with HTLCs sitting around in our holding cell for several
1124         // commitment dance rounds.
1125         let chanmon_cfgs = create_chanmon_cfgs(3);
1126         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1127         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1128         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1129         create_announced_chan_between_nodes(&nodes, 0, 1);
1130         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
1131
1132         // Fetch a route in advance as we will be unable to once we're unable to send.
1133         let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
1134
1135         let mut payments = Vec::new();
1136         for _ in 0..50 {
1137                 let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
1138                 nodes[1].node.send_payment_with_route(&route, payment_hash,
1139                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
1140                 payments.push((payment_preimage, payment_hash));
1141         }
1142         check_added_monitors!(nodes[1], 1);
1143
1144         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
1145         assert_eq!(events.len(), 1);
1146         let initial_payment_event = SendEvent::from_event(events.pop().unwrap());
1147         assert_eq!(initial_payment_event.node_id, nodes[2].node.get_our_node_id());
1148
1149         // There is now one HTLC in an outbound commitment transaction and (OUR_MAX_HTLCS - 1) HTLCs in
1150         // the holding cell waiting on B's RAA to send. At this point we should not be able to add
1151         // another HTLC.
1152         {
1153                 unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, payment_hash_1,
1154                                 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)
1155                         ), true, APIError::ChannelUnavailable { .. }, {});
1156                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1157         }
1158
1159         // This should also be true if we try to forward a payment.
1160         let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
1161         {
1162                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1163                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1164                 check_added_monitors!(nodes[0], 1);
1165         }
1166
1167         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1168         assert_eq!(events.len(), 1);
1169         let payment_event = SendEvent::from_event(events.pop().unwrap());
1170         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1171
1172         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1173         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
1174         // We have to forward pending HTLCs twice - once tries to forward the payment forward (and
1175         // fails), the second will process the resulting failure and fail the HTLC backward.
1176         expect_pending_htlcs_forwardable!(nodes[1]);
1177         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
1178         check_added_monitors!(nodes[1], 1);
1179
1180         let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1181         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_updates.update_fail_htlcs[0]);
1182         commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true);
1183
1184         expect_payment_failed_with_update!(nodes[0], payment_hash_2, false, chan_2.0.contents.short_channel_id, false);
1185
1186         // Now forward all the pending HTLCs and claim them back
1187         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &initial_payment_event.msgs[0]);
1188         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &initial_payment_event.commitment_msg);
1189         check_added_monitors!(nodes[2], 1);
1190
1191         let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1192         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1193         check_added_monitors!(nodes[1], 1);
1194         let as_updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1195
1196         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed);
1197         check_added_monitors!(nodes[1], 1);
1198         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1199
1200         for ref update in as_updates.update_add_htlcs.iter() {
1201                 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), update);
1202         }
1203         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_updates.commitment_signed);
1204         check_added_monitors!(nodes[2], 1);
1205         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
1206         check_added_monitors!(nodes[2], 1);
1207         let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1208
1209         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1210         check_added_monitors!(nodes[1], 1);
1211         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed);
1212         check_added_monitors!(nodes[1], 1);
1213         let as_final_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1214
1215         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_final_raa);
1216         check_added_monitors!(nodes[2], 1);
1217
1218         expect_pending_htlcs_forwardable!(nodes[2]);
1219
1220         let events = nodes[2].node.get_and_clear_pending_events();
1221         assert_eq!(events.len(), payments.len());
1222         for (event, &(_, ref hash)) in events.iter().zip(payments.iter()) {
1223                 match event {
1224                         &Event::PaymentClaimable { ref payment_hash, .. } => {
1225                                 assert_eq!(*payment_hash, *hash);
1226                         },
1227                         _ => panic!("Unexpected event"),
1228                 };
1229         }
1230
1231         for (preimage, _) in payments.drain(..) {
1232                 claim_payment(&nodes[1], &[&nodes[2]], preimage);
1233         }
1234
1235         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
1236 }
1237
1238 #[test]
1239 fn duplicate_htlc_test() {
1240         // Test that we accept duplicate payment_hash HTLCs across the network and that
1241         // claiming/failing them are all separate and don't affect each other
1242         let chanmon_cfgs = create_chanmon_cfgs(6);
1243         let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
1244         let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs, &[None, None, None, None, None, None]);
1245         let mut nodes = create_network(6, &node_cfgs, &node_chanmgrs);
1246
1247         // Create some initial channels to route via 3 to 4/5 from 0/1/2
1248         create_announced_chan_between_nodes(&nodes, 0, 3);
1249         create_announced_chan_between_nodes(&nodes, 1, 3);
1250         create_announced_chan_between_nodes(&nodes, 2, 3);
1251         create_announced_chan_between_nodes(&nodes, 3, 4);
1252         create_announced_chan_between_nodes(&nodes, 3, 5);
1253
1254         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000);
1255
1256         *nodes[0].network_payment_count.borrow_mut() -= 1;
1257         assert_eq!(route_payment(&nodes[1], &vec!(&nodes[3])[..], 1000000).0, payment_preimage);
1258
1259         *nodes[0].network_payment_count.borrow_mut() -= 1;
1260         assert_eq!(route_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], 1000000).0, payment_preimage);
1261
1262         claim_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], payment_preimage);
1263         fail_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], payment_hash);
1264         claim_payment(&nodes[1], &vec!(&nodes[3])[..], payment_preimage);
1265 }
1266
1267 #[test]
1268 fn test_duplicate_htlc_different_direction_onchain() {
1269         // Test that ChannelMonitor doesn't generate 2 preimage txn
1270         // when we have 2 HTLCs with same preimage that go across a node
1271         // in opposite directions, even with the same payment secret.
1272         let chanmon_cfgs = create_chanmon_cfgs(2);
1273         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1274         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1275         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1276
1277         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1278
1279         // balancing
1280         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
1281
1282         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000);
1283
1284         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], 800_000);
1285         let node_a_payment_secret = nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
1286         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[0]]], 800_000, payment_hash, node_a_payment_secret);
1287
1288         // Provide preimage to node 0 by claiming payment
1289         nodes[0].node.claim_funds(payment_preimage);
1290         expect_payment_claimed!(nodes[0], payment_hash, 800_000);
1291         check_added_monitors!(nodes[0], 1);
1292
1293         // Broadcast node 1 commitment txn
1294         let remote_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
1295
1296         assert_eq!(remote_txn[0].output.len(), 4); // 1 local, 1 remote, 1 htlc inbound, 1 htlc outbound
1297         let mut has_both_htlcs = 0; // check htlcs match ones committed
1298         for outp in remote_txn[0].output.iter() {
1299                 if outp.value == 800_000 / 1000 {
1300                         has_both_htlcs += 1;
1301                 } else if outp.value == 900_000 / 1000 {
1302                         has_both_htlcs += 1;
1303                 }
1304         }
1305         assert_eq!(has_both_htlcs, 2);
1306
1307         mine_transaction(&nodes[0], &remote_txn[0]);
1308         check_added_monitors!(nodes[0], 1);
1309         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
1310         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
1311
1312         let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
1313         assert_eq!(claim_txn.len(), 3);
1314
1315         check_spends!(claim_txn[0], remote_txn[0]); // Immediate HTLC claim with preimage
1316         check_spends!(claim_txn[1], remote_txn[0]);
1317         check_spends!(claim_txn[2], remote_txn[0]);
1318         let preimage_tx = &claim_txn[0];
1319         let (preimage_bump_tx, timeout_tx) = if claim_txn[1].input[0].previous_output == preimage_tx.input[0].previous_output {
1320                 (&claim_txn[1], &claim_txn[2])
1321         } else {
1322                 (&claim_txn[2], &claim_txn[1])
1323         };
1324
1325         assert_eq!(preimage_tx.input.len(), 1);
1326         assert_eq!(preimage_bump_tx.input.len(), 1);
1327
1328         assert_eq!(preimage_tx.input.len(), 1);
1329         assert_eq!(preimage_tx.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC 1 <--> 0, preimage tx
1330         assert_eq!(remote_txn[0].output[preimage_tx.input[0].previous_output.vout as usize].value, 800);
1331
1332         assert_eq!(timeout_tx.input.len(), 1);
1333         assert_eq!(timeout_tx.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // HTLC 0 <--> 1, timeout tx
1334         check_spends!(timeout_tx, remote_txn[0]);
1335         assert_eq!(remote_txn[0].output[timeout_tx.input[0].previous_output.vout as usize].value, 900);
1336
1337         let events = nodes[0].node.get_and_clear_pending_msg_events();
1338         assert_eq!(events.len(), 3);
1339         for e in events {
1340                 match e {
1341                         MessageSendEvent::BroadcastChannelUpdate { .. } => {},
1342                         MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::DisconnectPeer { ref msg } } => {
1343                                 assert_eq!(node_id, nodes[1].node.get_our_node_id());
1344                                 assert_eq!(msg.as_ref().unwrap().data, "Channel closed because commitment or closing transaction was confirmed on chain.");
1345                         },
1346                         MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
1347                                 assert!(update_add_htlcs.is_empty());
1348                                 assert!(update_fail_htlcs.is_empty());
1349                                 assert_eq!(update_fulfill_htlcs.len(), 1);
1350                                 assert!(update_fail_malformed_htlcs.is_empty());
1351                                 assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
1352                         },
1353                         _ => panic!("Unexpected event"),
1354                 }
1355         }
1356 }
1357
1358 #[test]
1359 fn test_basic_channel_reserve() {
1360         let chanmon_cfgs = create_chanmon_cfgs(2);
1361         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1362         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1363         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1364         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1365
1366         let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
1367         let channel_reserve = chan_stat.channel_reserve_msat;
1368
1369         // The 2* and +1 are for the fee spike reserve.
1370         let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], nodes[1], chan.2), 1 + 1, &get_channel_type_features!(nodes[0], nodes[1], chan.2));
1371         let max_can_send = 5000000 - channel_reserve - commit_tx_fee;
1372         let (mut route, our_payment_hash, _, our_payment_secret) =
1373                 get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
1374         route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
1375         let err = nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1376                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).err().unwrap();
1377         match err {
1378                 PaymentSendFailure::AllFailedResendSafe(ref fails) => {
1379                         if let &APIError::ChannelUnavailable { .. } = &fails[0] {}
1380                         else { panic!("Unexpected error variant"); }
1381                 },
1382                 _ => panic!("Unexpected error variant"),
1383         }
1384         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1385
1386         send_payment(&nodes[0], &vec![&nodes[1]], max_can_send);
1387 }
1388
1389 #[test]
1390 fn test_fee_spike_violation_fails_htlc() {
1391         let chanmon_cfgs = create_chanmon_cfgs(2);
1392         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1393         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1394         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1395         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1396
1397         let (mut route, payment_hash, _, payment_secret) =
1398                 get_route_and_payment_hash!(nodes[0], nodes[1], 3460000);
1399         route.paths[0].hops[0].fee_msat += 1;
1400         // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1401         let secp_ctx = Secp256k1::new();
1402         let session_priv = SecretKey::from_slice(&[42; 32]).expect("RNG is bad!");
1403
1404         let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1;
1405
1406         let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
1407         let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
1408                 3460001, RecipientOnionFields::secret_only(payment_secret), cur_height, &None).unwrap();
1409         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
1410         let msg = msgs::UpdateAddHTLC {
1411                 channel_id: chan.2,
1412                 htlc_id: 0,
1413                 amount_msat: htlc_msat,
1414                 payment_hash: payment_hash,
1415                 cltv_expiry: htlc_cltv,
1416                 onion_routing_packet: onion_packet,
1417                 skimmed_fee_msat: None,
1418         };
1419
1420         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
1421
1422         // Now manually create the commitment_signed message corresponding to the update_add
1423         // nodes[0] just sent. In the code for construction of this message, "local" refers
1424         // to the sender of the message, and "remote" refers to the receiver.
1425
1426         let feerate_per_kw = get_feerate!(nodes[0], nodes[1], chan.2);
1427
1428         const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
1429
1430         // Get the TestChannelSigner for each channel, which will be used to (1) get the keys
1431         // needed to sign the new commitment tx and (2) sign the new commitment tx.
1432         let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point, local_funding) = {
1433                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
1434                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
1435                 let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
1436                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
1437                 ).flatten().unwrap();
1438                 let chan_signer = local_chan.get_signer();
1439                 // Make the signer believe we validated another commitment, so we can release the secret
1440                 chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
1441
1442                 let pubkeys = chan_signer.as_ref().pubkeys();
1443                 (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
1444                  chan_signer.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER),
1445                  chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx),
1446                  chan_signer.as_ref().pubkeys().funding_pubkey)
1447         };
1448         let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = {
1449                 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
1450                 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
1451                 let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
1452                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
1453                 ).flatten().unwrap();
1454                 let chan_signer = remote_chan.get_signer();
1455                 let pubkeys = chan_signer.as_ref().pubkeys();
1456                 (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
1457                  chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
1458                  chan_signer.as_ref().pubkeys().funding_pubkey)
1459         };
1460
1461         // Assemble the set of keys we can use for signatures for our commitment_signed message.
1462         let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint,
1463                 &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint);
1464
1465         // Build the remote commitment transaction so we can sign it, and then later use the
1466         // signature for the commitment_signed message.
1467         let local_chan_balance = 1313;
1468
1469         let accepted_htlc_info = chan_utils::HTLCOutputInCommitment {
1470                 offered: false,
1471                 amount_msat: 3460001,
1472                 cltv_expiry: htlc_cltv,
1473                 payment_hash,
1474                 transaction_output_index: Some(1),
1475         };
1476
1477         let commitment_number = INITIAL_COMMITMENT_NUMBER - 1;
1478
1479         let res = {
1480                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
1481                 let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
1482                 let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
1483                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
1484                 ).flatten().unwrap();
1485                 let local_chan_signer = local_chan.get_signer();
1486                 let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
1487                         commitment_number,
1488                         95000,
1489                         local_chan_balance,
1490                         local_funding, remote_funding,
1491                         commit_tx_keys.clone(),
1492                         feerate_per_kw,
1493                         &mut vec![(accepted_htlc_info, ())],
1494                         &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
1495                 );
1496                 local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap()
1497         };
1498
1499         let commit_signed_msg = msgs::CommitmentSigned {
1500                 channel_id: chan.2,
1501                 signature: res.0,
1502                 htlc_signatures: res.1,
1503                 #[cfg(taproot)]
1504                 partial_signature_with_nonce: None,
1505         };
1506
1507         // Send the commitment_signed message to the nodes[1].
1508         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
1509         let _ = nodes[1].node.get_and_clear_pending_msg_events();
1510
1511         // Send the RAA to nodes[1].
1512         let raa_msg = msgs::RevokeAndACK {
1513                 channel_id: chan.2,
1514                 per_commitment_secret: local_secret,
1515                 next_per_commitment_point: next_local_point,
1516                 #[cfg(taproot)]
1517                 next_local_nonce: None,
1518         };
1519         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa_msg);
1520
1521         let events = nodes[1].node.get_and_clear_pending_msg_events();
1522         assert_eq!(events.len(), 1);
1523         // Make sure the HTLC failed in the way we expect.
1524         match events[0] {
1525                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, .. }, .. } => {
1526                         assert_eq!(update_fail_htlcs.len(), 1);
1527                         update_fail_htlcs[0].clone()
1528                 },
1529                 _ => panic!("Unexpected event"),
1530         };
1531         nodes[1].logger.assert_log("lightning::ln::channel".to_string(),
1532                 format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1);
1533
1534         check_added_monitors!(nodes[1], 2);
1535 }
1536
1537 #[test]
1538 fn test_chan_reserve_violation_outbound_htlc_inbound_chan() {
1539         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1540         // Set the fee rate for the channel very high, to the point where the fundee
1541         // sending any above-dust amount would result in a channel reserve violation.
1542         // In this test we check that we would be prevented from sending an HTLC in
1543         // this situation.
1544         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1545         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1546         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1547         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1548         let default_config = UserConfig::default();
1549         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1550
1551         let mut push_amt = 100_000_000;
1552         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1553
1554         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1555
1556         let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt);
1557
1558         // Fetch a route in advance as we will be unable to once we're unable to send.
1559         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000);
1560         // Sending exactly enough to hit the reserve amount should be accepted
1561         for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1562                 route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1563         }
1564
1565         // However one more HTLC should be significantly over the reserve amount and fail.
1566         unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash,
1567                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1568                 ), true, APIError::ChannelUnavailable { .. }, {});
1569         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1570 }
1571
1572 #[test]
1573 fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
1574         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1575         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1576         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1577         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1578         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1579         let default_config = UserConfig::default();
1580         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1581
1582         // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
1583         // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
1584         // transaction fee with 0 HTLCs (183 sats)).
1585         let mut push_amt = 100_000_000;
1586         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1587         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1588         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt);
1589
1590         // Send four HTLCs to cover the initial push_msat buffer we're required to include
1591         for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1592                 route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1593         }
1594
1595         let (mut route, payment_hash, _, payment_secret) =
1596                 get_route_and_payment_hash!(nodes[1], nodes[0], 1000);
1597         route.paths[0].hops[0].fee_msat = 700_000;
1598         // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1599         let secp_ctx = Secp256k1::new();
1600         let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
1601         let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1;
1602         let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
1603         let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
1604                 700_000, RecipientOnionFields::secret_only(payment_secret), cur_height, &None).unwrap();
1605         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
1606         let msg = msgs::UpdateAddHTLC {
1607                 channel_id: chan.2,
1608                 htlc_id: MIN_AFFORDABLE_HTLC_COUNT as u64,
1609                 amount_msat: htlc_msat,
1610                 payment_hash: payment_hash,
1611                 cltv_expiry: htlc_cltv,
1612                 onion_routing_packet: onion_packet,
1613                 skimmed_fee_msat: None,
1614         };
1615
1616         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg);
1617         // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
1618         nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string(), 1);
1619         assert_eq!(nodes[0].node.list_channels().len(), 0);
1620         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
1621         assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
1622         check_added_monitors!(nodes[0], 1);
1623         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() },
1624                 [nodes[1].node.get_our_node_id()], 100000);
1625 }
1626
1627 #[test]
1628 fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() {
1629         // Test that if we receive many dust HTLCs over an outbound channel, they don't count when
1630         // calculating our commitment transaction fee (this was previously broken).
1631         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1632         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1633
1634         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1635         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
1636         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1637         let default_config = UserConfig::default();
1638         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1639
1640         // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
1641         // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
1642         // transaction fee with 0 HTLCs (183 sats)).
1643         let mut push_amt = 100_000_000;
1644         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1645         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1646         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt);
1647
1648         let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000
1649                 + feerate_per_kw as u64 * htlc_success_tx_weight(&channel_type_features) / 1000 * 1000 - 1;
1650         // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel
1651         // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the
1652         // commitment transaction fee.
1653         route_payment(&nodes[1], &[&nodes[0]], dust_amt);
1654
1655         // Send four HTLCs to cover the initial push_msat buffer we're required to include
1656         for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1657                 route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1658         }
1659
1660         // One more than the dust amt should fail, however.
1661         let (mut route, our_payment_hash, _, our_payment_secret) =
1662                 get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt);
1663         route.paths[0].hops[0].fee_msat += 1;
1664         unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash,
1665                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1666                 ), true, APIError::ChannelUnavailable { .. }, {});
1667 }
1668
1669 #[test]
1670 fn test_chan_init_feerate_unaffordability() {
1671         // Test that we will reject channel opens which do not leave enough to pay for any HTLCs due to
1672         // channel reserve and feerate requirements.
1673         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1674         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1675         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1676         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1677         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1678         let default_config = UserConfig::default();
1679         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1680
1681         // Set the push_msat amount such that nodes[0] will not be able to afford to add even a single
1682         // HTLC.
1683         let mut push_amt = 100_000_000;
1684         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1685         assert_eq!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt + 1, 42, None, None).unwrap_err(),
1686                 APIError::APIMisuseError { err: "Funding amount (356) can't even pay fee for initial commitment transaction fee of 357.".to_string() });
1687
1688         // During open, we don't have a "counterparty channel reserve" to check against, so that
1689         // requirement only comes into play on the open_channel handling side.
1690         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1691         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt, 42, None, None).unwrap();
1692         let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
1693         open_channel_msg.push_msat += 1;
1694         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
1695
1696         let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
1697         assert_eq!(msg_events.len(), 1);
1698         match msg_events[0] {
1699                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
1700                         assert_eq!(msg.data, "Insufficient funding amount for initial reserve");
1701                 },
1702                 _ => panic!("Unexpected event"),
1703         }
1704 }
1705
1706 #[test]
1707 fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() {
1708         // Test that if we receive many dust HTLCs over an inbound channel, they don't count when
1709         // calculating our counterparty's commitment transaction fee (this was previously broken).
1710         let chanmon_cfgs = create_chanmon_cfgs(2);
1711         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1712         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
1713         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1714         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 98000000);
1715
1716         let payment_amt = 46000; // Dust amount
1717         // In the previous code, these first four payments would succeed.
1718         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1719         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1720         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1721         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1722
1723         // Then these next 5 would be interpreted by nodes[1] as violating the fee spike buffer.
1724         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1725         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1726         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1727         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1728         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1729
1730         // And this last payment previously resulted in nodes[1] closing on its inbound-channel
1731         // counterparty, because it counted all the previous dust HTLCs against nodes[0]'s commitment
1732         // transaction fee and therefore perceived this next payment as a channel reserve violation.
1733         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1734 }
1735
1736 #[test]
1737 fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
1738         let chanmon_cfgs = create_chanmon_cfgs(3);
1739         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1740         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1741         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1742         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1743         let _ = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000);
1744
1745         let feemsat = 239;
1746         let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat;
1747         let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
1748         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
1749         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
1750
1751         // Add a 2* and +1 for the fee spike reserve.
1752         let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features);
1753         let recv_value_1 = (chan_stat.value_to_self_msat - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlc)/2;
1754         let amt_msat_1 = recv_value_1 + total_routing_fee_msat;
1755
1756         // Add a pending HTLC.
1757         let (route_1, our_payment_hash_1, _, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat_1);
1758         let payment_event_1 = {
1759                 nodes[0].node.send_payment_with_route(&route_1, our_payment_hash_1,
1760                         RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
1761                 check_added_monitors!(nodes[0], 1);
1762
1763                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1764                 assert_eq!(events.len(), 1);
1765                 SendEvent::from_event(events.remove(0))
1766         };
1767         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
1768
1769         // Attempt to trigger a channel reserve violation --> payment failure.
1770         let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2, &channel_type_features);
1771         let recv_value_2 = chan_stat.value_to_self_msat - amt_msat_1 - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlcs + 1;
1772         let amt_msat_2 = recv_value_2 + total_routing_fee_msat;
1773         let mut route_2 = route_1.clone();
1774         route_2.paths[0].hops.last_mut().unwrap().fee_msat = amt_msat_2;
1775
1776         // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1777         let secp_ctx = Secp256k1::new();
1778         let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
1779         let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1;
1780         let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route_2.paths[0], &session_priv).unwrap();
1781         let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
1782                 &route_2.paths[0], recv_value_2, RecipientOnionFields::spontaneous_empty(), cur_height, &None).unwrap();
1783         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash_1).unwrap();
1784         let msg = msgs::UpdateAddHTLC {
1785                 channel_id: chan.2,
1786                 htlc_id: 1,
1787                 amount_msat: htlc_msat + 1,
1788                 payment_hash: our_payment_hash_1,
1789                 cltv_expiry: htlc_cltv,
1790                 onion_routing_packet: onion_packet,
1791                 skimmed_fee_msat: None,
1792         };
1793
1794         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
1795         // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
1796         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote HTLC add would put them under remote reserve value".to_string(), 1);
1797         assert_eq!(nodes[1].node.list_channels().len(), 1);
1798         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
1799         assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
1800         check_added_monitors!(nodes[1], 1);
1801         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() },
1802                 [nodes[0].node.get_our_node_id()], 100000);
1803 }
1804
1805 #[test]
1806 fn test_inbound_outbound_capacity_is_not_zero() {
1807         let chanmon_cfgs = create_chanmon_cfgs(2);
1808         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1809         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1810         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1811         let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1812         let channels0 = node_chanmgrs[0].list_channels();
1813         let channels1 = node_chanmgrs[1].list_channels();
1814         let default_config = UserConfig::default();
1815         assert_eq!(channels0.len(), 1);
1816         assert_eq!(channels1.len(), 1);
1817
1818         let reserve = get_holder_selected_channel_reserve_satoshis(100_000, &default_config);
1819         assert_eq!(channels0[0].inbound_capacity_msat, 95000000 - reserve*1000);
1820         assert_eq!(channels1[0].outbound_capacity_msat, 95000000 - reserve*1000);
1821
1822         assert_eq!(channels0[0].outbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
1823         assert_eq!(channels1[0].inbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
1824 }
1825
1826 fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64, channel_type_features: &ChannelTypeFeatures) -> u64 {
1827         (commitment_tx_base_weight(channel_type_features) + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000
1828 }
1829
1830 #[test]
1831 fn test_channel_reserve_holding_cell_htlcs() {
1832         let chanmon_cfgs = create_chanmon_cfgs(3);
1833         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1834         // When this test was written, the default base fee floated based on the HTLC count.
1835         // It is now fixed, so we simply set the fee to the expected value here.
1836         let mut config = test_default_channel_config();
1837         config.channel_config.forwarding_fee_base_msat = 239;
1838         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
1839         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1840         let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 190000, 1001);
1841         let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 190000, 1001);
1842
1843         let mut stat01 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
1844         let mut stat11 = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2);
1845
1846         let mut stat12 = get_channel_value_stat!(nodes[1], nodes[2], chan_2.2);
1847         let mut stat22 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2);
1848
1849         macro_rules! expect_forward {
1850                 ($node: expr) => {{
1851                         let mut events = $node.node.get_and_clear_pending_msg_events();
1852                         assert_eq!(events.len(), 1);
1853                         check_added_monitors!($node, 1);
1854                         let payment_event = SendEvent::from_event(events.remove(0));
1855                         payment_event
1856                 }}
1857         }
1858
1859         let feemsat = 239; // set above
1860         let total_fee_msat = (nodes.len() - 2) as u64 * feemsat;
1861         let feerate = get_feerate!(nodes[0], nodes[1], chan_1.2);
1862         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_1.2);
1863
1864         let recv_value_0 = stat01.counterparty_max_htlc_value_in_flight_msat - total_fee_msat;
1865
1866         // attempt to send amt_msat > their_max_htlc_value_in_flight_msat
1867         {
1868                 let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
1869                         .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0);
1870                 let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, recv_value_0);
1871                 route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
1872                 assert!(route.paths[0].hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
1873
1874                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1875                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1876                         ), true, APIError::ChannelUnavailable { .. }, {});
1877                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1878         }
1879
1880         // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete
1881         // nodes[0]'s wealth
1882         loop {
1883                 let amt_msat = recv_value_0 + total_fee_msat;
1884                 // 3 for the 3 HTLCs that will be sent, 2* and +1 for the fee spike reserve.
1885                 // Also, ensure that each payment has enough to be over the dust limit to
1886                 // ensure it'll be included in each commit tx fee calculation.
1887                 let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features);
1888                 let ensure_htlc_amounts_above_dust_buffer = 3 * (stat01.counterparty_dust_limit_msat + 1000);
1889                 if stat01.value_to_self_msat < stat01.channel_reserve_msat + commit_tx_fee_all_htlcs + ensure_htlc_amounts_above_dust_buffer + amt_msat {
1890                         break;
1891                 }
1892
1893                 let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
1894                         .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0);
1895                 let route = get_route!(nodes[0], payment_params, recv_value_0).unwrap();
1896                 let (payment_preimage, ..) = send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], recv_value_0);
1897                 claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
1898
1899                 let (stat01_, stat11_, stat12_, stat22_) = (
1900                         get_channel_value_stat!(nodes[0], nodes[1], chan_1.2),
1901                         get_channel_value_stat!(nodes[1], nodes[0], chan_1.2),
1902                         get_channel_value_stat!(nodes[1], nodes[2], chan_2.2),
1903                         get_channel_value_stat!(nodes[2], nodes[1], chan_2.2),
1904                 );
1905
1906                 assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat);
1907                 assert_eq!(stat11_.value_to_self_msat, stat11.value_to_self_msat + amt_msat);
1908                 assert_eq!(stat12_.value_to_self_msat, stat12.value_to_self_msat - (amt_msat - feemsat));
1909                 assert_eq!(stat22_.value_to_self_msat, stat22.value_to_self_msat + (amt_msat - feemsat));
1910                 stat01 = stat01_; stat11 = stat11_; stat12 = stat12_; stat22 = stat22_;
1911         }
1912
1913         // adding pending output.
1914         // 2* and +1 HTLCs on the commit tx fee for the fee spike reserve.
1915         // The reason we're dividing by two here is as follows: the dividend is the total outbound liquidity
1916         // after fees, the channel reserve, and the fee spike buffer are removed. We eventually want to
1917         // divide this quantity into 3 portions, that will each be sent in an HTLC. This allows us
1918         // to test channel channel reserve policy at the edges of what amount is sendable, i.e.
1919         // cases where 1 msat over X amount will cause a payment failure, but anything less than
1920         // that can be sent successfully. So, dividing by two is a somewhat arbitrary way of getting
1921         // the amount of the first of these aforementioned 3 payments. The reason we split into 3 payments
1922         // is to test the behavior of the holding cell with respect to channel reserve and commit tx fee
1923         // policy.
1924         let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features);
1925         let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs)/2;
1926         let amt_msat_1 = recv_value_1 + total_fee_msat;
1927
1928         let (route_1, our_payment_hash_1, our_payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_1);
1929         let payment_event_1 = {
1930                 nodes[0].node.send_payment_with_route(&route_1, our_payment_hash_1,
1931                         RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
1932                 check_added_monitors!(nodes[0], 1);
1933
1934                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1935                 assert_eq!(events.len(), 1);
1936                 SendEvent::from_event(events.remove(0))
1937         };
1938         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
1939
1940         // channel reserve test with htlc pending output > 0
1941         let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs;
1942         {
1943                 let mut route = route_1.clone();
1944                 route.paths[0].hops.last_mut().unwrap().fee_msat = recv_value_2 + 1;
1945                 let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]);
1946                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1947                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1948                         ), true, APIError::ChannelUnavailable { .. }, {});
1949                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1950         }
1951
1952         // split the rest to test holding cell
1953         let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features);
1954         let additional_htlc_cost_msat = commit_tx_fee_3_htlcs - commit_tx_fee_2_htlcs;
1955         let recv_value_21 = recv_value_2/2 - additional_htlc_cost_msat/2;
1956         let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat - additional_htlc_cost_msat;
1957         {
1958                 let stat = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
1959                 assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat + commit_tx_fee_3_htlcs), stat.channel_reserve_msat);
1960         }
1961
1962         // now see if they go through on both sides
1963         let (route_21, our_payment_hash_21, our_payment_preimage_21, our_payment_secret_21) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_21);
1964         // but this will stuck in the holding cell
1965         nodes[0].node.send_payment_with_route(&route_21, our_payment_hash_21,
1966                 RecipientOnionFields::secret_only(our_payment_secret_21), PaymentId(our_payment_hash_21.0)).unwrap();
1967         check_added_monitors!(nodes[0], 0);
1968         let events = nodes[0].node.get_and_clear_pending_events();
1969         assert_eq!(events.len(), 0);
1970
1971         // test with outbound holding cell amount > 0
1972         {
1973                 let (mut route, our_payment_hash, _, our_payment_secret) =
1974                         get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22);
1975                 route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
1976                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1977                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1978                         ), true, APIError::ChannelUnavailable { .. }, {});
1979                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1980         }
1981
1982         let (route_22, our_payment_hash_22, our_payment_preimage_22, our_payment_secret_22) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22);
1983         // this will also stuck in the holding cell
1984         nodes[0].node.send_payment_with_route(&route_22, our_payment_hash_22,
1985                 RecipientOnionFields::secret_only(our_payment_secret_22), PaymentId(our_payment_hash_22.0)).unwrap();
1986         check_added_monitors!(nodes[0], 0);
1987         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
1988         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1989
1990         // flush the pending htlc
1991         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg);
1992         let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1993         check_added_monitors!(nodes[1], 1);
1994
1995         // the pending htlc should be promoted to committed
1996         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack);
1997         check_added_monitors!(nodes[0], 1);
1998         let commitment_update_2 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1999
2000         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed);
2001         let bs_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2002         // No commitment_signed so get_event_msg's assert(len == 1) passes
2003         check_added_monitors!(nodes[0], 1);
2004
2005         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack);
2006         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2007         check_added_monitors!(nodes[1], 1);
2008
2009         expect_pending_htlcs_forwardable!(nodes[1]);
2010
2011         let ref payment_event_11 = expect_forward!(nodes[1]);
2012         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]);
2013         commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false);
2014
2015         expect_pending_htlcs_forwardable!(nodes[2]);
2016         expect_payment_claimable!(nodes[2], our_payment_hash_1, our_payment_secret_1, recv_value_1);
2017
2018         // flush the htlcs in the holding cell
2019         assert_eq!(commitment_update_2.update_add_htlcs.len(), 2);
2020         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]);
2021         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]);
2022         commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false);
2023         expect_pending_htlcs_forwardable!(nodes[1]);
2024
2025         let ref payment_event_3 = expect_forward!(nodes[1]);
2026         assert_eq!(payment_event_3.msgs.len(), 2);
2027         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[0]);
2028         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]);
2029
2030         commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false);
2031         expect_pending_htlcs_forwardable!(nodes[2]);
2032
2033         let events = nodes[2].node.get_and_clear_pending_events();
2034         assert_eq!(events.len(), 2);
2035         match events[0] {
2036                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
2037                         assert_eq!(our_payment_hash_21, *payment_hash);
2038                         assert_eq!(recv_value_21, amount_msat);
2039                         assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
2040                         assert_eq!(via_channel_id, Some(chan_2.2));
2041                         match &purpose {
2042                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
2043                                         assert!(payment_preimage.is_none());
2044                                         assert_eq!(our_payment_secret_21, *payment_secret);
2045                                 },
2046                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
2047                         }
2048                 },
2049                 _ => panic!("Unexpected event"),
2050         }
2051         match events[1] {
2052                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
2053                         assert_eq!(our_payment_hash_22, *payment_hash);
2054                         assert_eq!(recv_value_22, amount_msat);
2055                         assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
2056                         assert_eq!(via_channel_id, Some(chan_2.2));
2057                         match &purpose {
2058                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
2059                                         assert!(payment_preimage.is_none());
2060                                         assert_eq!(our_payment_secret_22, *payment_secret);
2061                                 },
2062                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
2063                         }
2064                 },
2065                 _ => panic!("Unexpected event"),
2066         }
2067
2068         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_1);
2069         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21);
2070         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22);
2071
2072         let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1, &channel_type_features);
2073         let recv_value_3 = commit_tx_fee_2_htlcs - commit_tx_fee_0_htlcs - total_fee_msat;
2074         send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_3);
2075
2076         let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
2077         let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat) - (recv_value_3 + total_fee_msat);
2078         let stat0 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
2079         assert_eq!(stat0.value_to_self_msat, expected_value_to_self);
2080         assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat + commit_tx_fee_1_htlc);
2081
2082         let stat2 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2);
2083         assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22 + recv_value_3);
2084 }
2085
2086 #[test]
2087 fn channel_reserve_in_flight_removes() {
2088         // In cases where one side claims an HTLC, it thinks it has additional available funds that it
2089         // can send to its counterparty, but due to update ordering, the other side may not yet have
2090         // considered those HTLCs fully removed.
2091         // This tests that we don't count HTLCs which will not be included in the next remote
2092         // commitment transaction towards the reserve value (as it implies no commitment transaction
2093         // will be generated which violates the remote reserve value).
2094         // This was broken previously, and discovered by the chanmon_fail_consistency fuzz test.
2095         // To test this we:
2096         //  * route two HTLCs from A to B (note that, at a high level, this test is checking that, when
2097         //    you consider the values of both of these HTLCs, B may not send an HTLC back to A, but if
2098         //    you only consider the value of the first HTLC, it may not),
2099         //  * start routing a third HTLC from A to B,
2100         //  * claim the first two HTLCs (though B will generate an update_fulfill for one, and put
2101         //    the other claim in its holding cell, as it immediately goes into AwaitingRAA),
2102         //  * deliver the first fulfill from B
2103         //  * deliver the update_add and an RAA from A, resulting in B freeing the second holding cell
2104         //    claim,
2105         //  * deliver A's response CS and RAA.
2106         //    This results in A having the second HTLC in AwaitingRemovedRemoteRevoke, but B having
2107         //    removed it fully. B now has the push_msat plus the first two HTLCs in value.
2108         //  * Now B happily sends another HTLC, potentially violating its reserve value from A's point
2109         //    of view (if A counts the AwaitingRemovedRemoteRevoke HTLC).
2110         let chanmon_cfgs = create_chanmon_cfgs(2);
2111         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2112         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2113         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2114         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2115
2116         let b_chan_values = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2);
2117         // Route the first two HTLCs.
2118         let payment_value_1 = b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000;
2119         let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], payment_value_1);
2120         let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 20_000);
2121
2122         // Start routing the third HTLC (this is just used to get everyone in the right state).
2123         let (route, payment_hash_3, payment_preimage_3, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
2124         let send_1 = {
2125                 nodes[0].node.send_payment_with_route(&route, payment_hash_3,
2126                         RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
2127                 check_added_monitors!(nodes[0], 1);
2128                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2129                 assert_eq!(events.len(), 1);
2130                 SendEvent::from_event(events.remove(0))
2131         };
2132
2133         // Now claim both of the first two HTLCs on B's end, putting B in AwaitingRAA and generating an
2134         // initial fulfill/CS.
2135         nodes[1].node.claim_funds(payment_preimage_1);
2136         expect_payment_claimed!(nodes[1], payment_hash_1, payment_value_1);
2137         check_added_monitors!(nodes[1], 1);
2138         let bs_removes = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2139
2140         // This claim goes in B's holding cell, allowing us to have a pending B->A RAA which does not
2141         // remove the second HTLC when we send the HTLC back from B to A.
2142         nodes[1].node.claim_funds(payment_preimage_2);
2143         expect_payment_claimed!(nodes[1], payment_hash_2, 20_000);
2144         check_added_monitors!(nodes[1], 1);
2145         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2146
2147         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_removes.update_fulfill_htlcs[0]);
2148         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed);
2149         check_added_monitors!(nodes[0], 1);
2150         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2151         expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
2152
2153         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]);
2154         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg);
2155         check_added_monitors!(nodes[1], 1);
2156         // B is already AwaitingRAA, so cant generate a CS here
2157         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2158
2159         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2160         check_added_monitors!(nodes[1], 1);
2161         let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2162
2163         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2164         check_added_monitors!(nodes[0], 1);
2165         let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2166
2167         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2168         check_added_monitors!(nodes[1], 1);
2169         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2170
2171         // The second HTLCis removed, but as A is in AwaitingRAA it can't generate a CS here, so the
2172         // RAA that B generated above doesn't fully resolve the second HTLC from A's point of view.
2173         // However, the RAA A generates here *does* fully resolve the HTLC from B's point of view (as A
2174         // can no longer broadcast a commitment transaction with it and B has the preimage so can go
2175         // on-chain as necessary).
2176         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_cs.update_fulfill_htlcs[0]);
2177         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
2178         check_added_monitors!(nodes[0], 1);
2179         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2180         expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false);
2181
2182         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2183         check_added_monitors!(nodes[1], 1);
2184         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2185
2186         expect_pending_htlcs_forwardable!(nodes[1]);
2187         expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 100000);
2188
2189         // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't
2190         // resolve the second HTLC from A's point of view.
2191         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2192         check_added_monitors!(nodes[0], 1);
2193         expect_payment_path_successful!(nodes[0]);
2194         let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2195
2196         // Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back
2197         // to A to ensure that A doesn't count the almost-removed HTLC in update_add processing.
2198         let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[1], nodes[0], 10000);
2199         let send_2 = {
2200                 nodes[1].node.send_payment_with_route(&route, payment_hash_4,
2201                         RecipientOnionFields::secret_only(payment_secret_4), PaymentId(payment_hash_4.0)).unwrap();
2202                 check_added_monitors!(nodes[1], 1);
2203                 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
2204                 assert_eq!(events.len(), 1);
2205                 SendEvent::from_event(events.remove(0))
2206         };
2207
2208         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_2.msgs[0]);
2209         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_2.commitment_msg);
2210         check_added_monitors!(nodes[0], 1);
2211         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2212
2213         // Now just resolve all the outstanding messages/HTLCs for completeness...
2214
2215         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2216         check_added_monitors!(nodes[1], 1);
2217         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2218
2219         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2220         check_added_monitors!(nodes[1], 1);
2221
2222         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2223         check_added_monitors!(nodes[0], 1);
2224         expect_payment_path_successful!(nodes[0]);
2225         let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2226
2227         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2228         check_added_monitors!(nodes[1], 1);
2229         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2230
2231         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2232         check_added_monitors!(nodes[0], 1);
2233
2234         expect_pending_htlcs_forwardable!(nodes[0]);
2235         expect_payment_claimable!(nodes[0], payment_hash_4, payment_secret_4, 10000);
2236
2237         claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4);
2238         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
2239 }
2240
2241 #[test]
2242 fn channel_monitor_network_test() {
2243         // Simple test which builds a network of ChannelManagers, connects them to each other, and
2244         // tests that ChannelMonitor is able to recover from various states.
2245         let chanmon_cfgs = create_chanmon_cfgs(5);
2246         let node_cfgs = create_node_cfgs(5, &chanmon_cfgs);
2247         let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &[None, None, None, None, None]);
2248         let nodes = create_network(5, &node_cfgs, &node_chanmgrs);
2249
2250         // Create some initial channels
2251         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2252         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
2253         let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
2254         let chan_4 = create_announced_chan_between_nodes(&nodes, 3, 4);
2255
2256         // Make sure all nodes are at the same starting height
2257         connect_blocks(&nodes[0], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
2258         connect_blocks(&nodes[1], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
2259         connect_blocks(&nodes[2], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
2260         connect_blocks(&nodes[3], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[3].best_block_info().1);
2261         connect_blocks(&nodes[4], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[4].best_block_info().1);
2262
2263         // Rebalance the network a bit by relaying one payment through all the channels...
2264         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2265         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2266         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2267         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2268
2269         // Simple case with no pending HTLCs:
2270         nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
2271         check_added_monitors!(nodes[1], 1);
2272         check_closed_broadcast!(nodes[1], true);
2273         {
2274                 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
2275                 assert_eq!(node_txn.len(), 1);
2276                 mine_transaction(&nodes[0], &node_txn[0]);
2277                 check_added_monitors!(nodes[0], 1);
2278                 test_txn_broadcast(&nodes[0], &chan_1, Some(node_txn[0].clone()), HTLCType::NONE);
2279         }
2280         check_closed_broadcast!(nodes[0], true);
2281         assert_eq!(nodes[0].node.list_channels().len(), 0);
2282         assert_eq!(nodes[1].node.list_channels().len(), 1);
2283         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2284         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
2285
2286         // One pending HTLC is discarded by the force-close:
2287         let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000);
2288
2289         // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
2290         // broadcasted until we reach the timelock time).
2291         nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
2292         check_closed_broadcast!(nodes[1], true);
2293         check_added_monitors!(nodes[1], 1);
2294         {
2295                 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE);
2296                 connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1);
2297                 test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
2298                 mine_transaction(&nodes[2], &node_txn[0]);
2299                 check_added_monitors!(nodes[2], 1);
2300                 test_txn_broadcast(&nodes[2], &chan_2, Some(node_txn[0].clone()), HTLCType::NONE);
2301         }
2302         check_closed_broadcast!(nodes[2], true);
2303         assert_eq!(nodes[1].node.list_channels().len(), 0);
2304         assert_eq!(nodes[2].node.list_channels().len(), 1);
2305         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
2306         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2307
2308         macro_rules! claim_funds {
2309                 ($node: expr, $prev_node: expr, $preimage: expr, $payment_hash: expr) => {
2310                         {
2311                                 $node.node.claim_funds($preimage);
2312                                 expect_payment_claimed!($node, $payment_hash, 3_000_000);
2313                                 check_added_monitors!($node, 1);
2314
2315                                 let events = $node.node.get_and_clear_pending_msg_events();
2316                                 assert_eq!(events.len(), 1);
2317                                 match events[0] {
2318                                         MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. } } => {
2319                                                 assert!(update_add_htlcs.is_empty());
2320                                                 assert!(update_fail_htlcs.is_empty());
2321                                                 assert_eq!(*node_id, $prev_node.node.get_our_node_id());
2322                                         },
2323                                         _ => panic!("Unexpected event"),
2324                                 };
2325                         }
2326                 }
2327         }
2328
2329         // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
2330         // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
2331         nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap();
2332         check_added_monitors!(nodes[2], 1);
2333         check_closed_broadcast!(nodes[2], true);
2334         let node2_commitment_txid;
2335         {
2336                 let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE);
2337                 connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1);
2338                 test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT);
2339                 node2_commitment_txid = node_txn[0].txid();
2340
2341                 // Claim the payment on nodes[3], giving it knowledge of the preimage
2342                 claim_funds!(nodes[3], nodes[2], payment_preimage_1, payment_hash_1);
2343                 mine_transaction(&nodes[3], &node_txn[0]);
2344                 check_added_monitors!(nodes[3], 1);
2345                 check_preimage_claim(&nodes[3], &node_txn);
2346         }
2347         check_closed_broadcast!(nodes[3], true);
2348         assert_eq!(nodes[2].node.list_channels().len(), 0);
2349         assert_eq!(nodes[3].node.list_channels().len(), 1);
2350         check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000);
2351         check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
2352
2353         // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
2354         // confusing us in the following tests.
2355         let chan_3_mon = nodes[3].chain_monitor.chain_monitor.remove_monitor(&OutPoint { txid: chan_3.3.txid(), index: 0 });
2356
2357         // One pending HTLC to time out:
2358         let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[3], &[&nodes[4]], 3_000_000);
2359         // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for
2360         // buffer space).
2361
2362         let (close_chan_update_1, close_chan_update_2) = {
2363                 connect_blocks(&nodes[3], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
2364                 let events = nodes[3].node.get_and_clear_pending_msg_events();
2365                 assert_eq!(events.len(), 2);
2366                 let close_chan_update_1 = match events[0] {
2367                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
2368                                 msg.clone()
2369                         },
2370                         _ => panic!("Unexpected event"),
2371                 };
2372                 match events[1] {
2373                         MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
2374                                 assert_eq!(node_id, nodes[4].node.get_our_node_id());
2375                         },
2376                         _ => panic!("Unexpected event"),
2377                 }
2378                 check_added_monitors!(nodes[3], 1);
2379
2380                 // Clear bumped claiming txn spending node 2 commitment tx. Bumped txn are generated after reaching some height timer.
2381                 {
2382                         let mut node_txn = nodes[3].tx_broadcaster.txn_broadcasted.lock().unwrap();
2383                         node_txn.retain(|tx| {
2384                                 if tx.input[0].previous_output.txid == node2_commitment_txid {
2385                                         false
2386                                 } else { true }
2387                         });
2388                 }
2389
2390                 let node_txn = test_txn_broadcast(&nodes[3], &chan_4, None, HTLCType::TIMEOUT);
2391
2392                 // Claim the payment on nodes[4], giving it knowledge of the preimage
2393                 claim_funds!(nodes[4], nodes[3], payment_preimage_2, payment_hash_2);
2394
2395                 connect_blocks(&nodes[4], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2);
2396                 let events = nodes[4].node.get_and_clear_pending_msg_events();
2397                 assert_eq!(events.len(), 2);
2398                 let close_chan_update_2 = match events[0] {
2399                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
2400                                 msg.clone()
2401                         },
2402                         _ => panic!("Unexpected event"),
2403                 };
2404                 match events[1] {
2405                         MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
2406                                 assert_eq!(node_id, nodes[3].node.get_our_node_id());
2407                         },
2408                         _ => panic!("Unexpected event"),
2409                 }
2410                 check_added_monitors!(nodes[4], 1);
2411                 test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
2412                 check_closed_event!(nodes[4], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000);
2413
2414                 mine_transaction(&nodes[4], &node_txn[0]);
2415                 check_preimage_claim(&nodes[4], &node_txn);
2416                 (close_chan_update_1, close_chan_update_2)
2417         };
2418         nodes[3].gossip_sync.handle_channel_update(&close_chan_update_2).unwrap();
2419         nodes[4].gossip_sync.handle_channel_update(&close_chan_update_1).unwrap();
2420         assert_eq!(nodes[3].node.list_channels().len(), 0);
2421         assert_eq!(nodes[4].node.list_channels().len(), 0);
2422
2423         assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon),
2424                 Ok(ChannelMonitorUpdateStatus::Completed));
2425         check_closed_event!(nodes[3], 1, ClosureReason::HolderForceClosed, [nodes[4].node.get_our_node_id()], 100000);
2426 }
2427
2428 #[test]
2429 fn test_justice_tx_htlc_timeout() {
2430         // Test justice txn built on revoked HTLC-Timeout tx, against both sides
2431         let mut alice_config = UserConfig::default();
2432         alice_config.channel_handshake_config.announced_channel = true;
2433         alice_config.channel_handshake_limits.force_announced_channel_preference = false;
2434         alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5;
2435         let mut bob_config = UserConfig::default();
2436         bob_config.channel_handshake_config.announced_channel = true;
2437         bob_config.channel_handshake_limits.force_announced_channel_preference = false;
2438         bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3;
2439         let user_cfgs = [Some(alice_config), Some(bob_config)];
2440         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2441         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2442         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
2443         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2444         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
2445         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2446         // Create some new channels:
2447         let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1);
2448
2449         // A pending HTLC which will be revoked:
2450         let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2451         // Get the will-be-revoked local txn from nodes[0]
2452         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_5.2);
2453         assert_eq!(revoked_local_txn.len(), 2); // First commitment tx, then HTLC tx
2454         assert_eq!(revoked_local_txn[0].input.len(), 1);
2455         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_5.3.txid());
2456         assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to 0 are present
2457         assert_eq!(revoked_local_txn[1].input.len(), 1);
2458         assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
2459         assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
2460         // Revoke the old state
2461         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
2462
2463         {
2464                 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2465                 {
2466                         let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
2467                         assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2468                         assert_eq!(node_txn[0].input.len(), 2); // We should claim the revoked output and the HTLC output
2469                         check_spends!(node_txn[0], revoked_local_txn[0]);
2470                         node_txn.swap_remove(0);
2471                 }
2472                 check_added_monitors!(nodes[1], 1);
2473                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2474                 test_txn_broadcast(&nodes[1], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
2475
2476                 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2477                 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
2478                 // Verify broadcast of revoked HTLC-timeout
2479                 let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
2480                 check_added_monitors!(nodes[0], 1);
2481                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2482                 // Broadcast revoked HTLC-timeout on node 1
2483                 mine_transaction(&nodes[1], &node_txn[1]);
2484                 test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone());
2485         }
2486         get_announce_close_broadcast_events(&nodes, 0, 1);
2487         assert_eq!(nodes[0].node.list_channels().len(), 0);
2488         assert_eq!(nodes[1].node.list_channels().len(), 0);
2489 }
2490
2491 #[test]
2492 fn test_justice_tx_htlc_success() {
2493         // Test justice txn built on revoked HTLC-Success tx, against both sides
2494         let mut alice_config = UserConfig::default();
2495         alice_config.channel_handshake_config.announced_channel = true;
2496         alice_config.channel_handshake_limits.force_announced_channel_preference = false;
2497         alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5;
2498         let mut bob_config = UserConfig::default();
2499         bob_config.channel_handshake_config.announced_channel = true;
2500         bob_config.channel_handshake_limits.force_announced_channel_preference = false;
2501         bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3;
2502         let user_cfgs = [Some(alice_config), Some(bob_config)];
2503         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2504         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2505         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
2506         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2507         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
2508         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2509         // Create some new channels:
2510         let chan_6 = create_announced_chan_between_nodes(&nodes, 0, 1);
2511
2512         // A pending HTLC which will be revoked:
2513         let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2514         // Get the will-be-revoked local txn from B
2515         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_6.2);
2516         assert_eq!(revoked_local_txn.len(), 1); // Only commitment tx
2517         assert_eq!(revoked_local_txn[0].input.len(), 1);
2518         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_6.3.txid());
2519         assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to A are present
2520         // Revoke the old state
2521         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_4);
2522         {
2523                 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2524                 {
2525                         let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
2526                         assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2527                         assert_eq!(node_txn[0].input.len(), 1); // We claim the received HTLC output
2528
2529                         check_spends!(node_txn[0], revoked_local_txn[0]);
2530                         node_txn.swap_remove(0);
2531                 }
2532                 check_added_monitors!(nodes[0], 1);
2533                 test_txn_broadcast(&nodes[0], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
2534
2535                 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2536                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2537                 let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
2538                 check_added_monitors!(nodes[1], 1);
2539                 mine_transaction(&nodes[0], &node_txn[1]);
2540                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2541                 test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone());
2542         }
2543         get_announce_close_broadcast_events(&nodes, 0, 1);
2544         assert_eq!(nodes[0].node.list_channels().len(), 0);
2545         assert_eq!(nodes[1].node.list_channels().len(), 0);
2546 }
2547
2548 #[test]
2549 fn revoked_output_claim() {
2550         // Simple test to ensure a node will claim a revoked output when a stale remote commitment
2551         // transaction is broadcast by its counterparty
2552         let chanmon_cfgs = create_chanmon_cfgs(2);
2553         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2554         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2555         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2556         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2557         // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output
2558         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2559         assert_eq!(revoked_local_txn.len(), 1);
2560         // Only output is the full channel value back to nodes[0]:
2561         assert_eq!(revoked_local_txn[0].output.len(), 1);
2562         // Send a payment through, updating everyone's latest commitment txn
2563         send_payment(&nodes[0], &vec!(&nodes[1])[..], 5000000);
2564
2565         // Inform nodes[1] that nodes[0] broadcast a stale tx
2566         mine_transaction(&nodes[1], &revoked_local_txn[0]);
2567         check_added_monitors!(nodes[1], 1);
2568         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2569         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
2570         assert_eq!(node_txn.len(), 1); // ChannelMonitor: justice tx against revoked to_local output
2571
2572         check_spends!(node_txn[0], revoked_local_txn[0]);
2573
2574         // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
2575         mine_transaction(&nodes[0], &revoked_local_txn[0]);
2576         get_announce_close_broadcast_events(&nodes, 0, 1);
2577         check_added_monitors!(nodes[0], 1);
2578         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2579 }
2580
2581 #[test]
2582 fn test_forming_justice_tx_from_monitor_updates() {
2583         do_test_forming_justice_tx_from_monitor_updates(true);
2584         do_test_forming_justice_tx_from_monitor_updates(false);
2585 }
2586
2587 fn do_test_forming_justice_tx_from_monitor_updates(broadcast_initial_commitment: bool) {
2588         // Simple test to make sure that the justice tx formed in WatchtowerPersister
2589         // is properly formed and can be broadcasted/confirmed successfully in the event
2590         // that a revoked commitment transaction is broadcasted
2591         // (Similar to `revoked_output_claim` test but we get the justice tx + broadcast manually)
2592         let chanmon_cfgs = create_chanmon_cfgs(2);
2593         let destination_script0 = chanmon_cfgs[0].keys_manager.get_destination_script().unwrap();
2594         let destination_script1 = chanmon_cfgs[1].keys_manager.get_destination_script().unwrap();
2595         let persisters = vec![WatchtowerPersister::new(destination_script0),
2596                 WatchtowerPersister::new(destination_script1)];
2597         let node_cfgs = create_node_cfgs_with_persisters(2, &chanmon_cfgs, persisters.iter().collect());
2598         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2599         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2600         let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
2601         let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 };
2602
2603         if !broadcast_initial_commitment {
2604                 // Send a payment to move the channel forward
2605                 send_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000);
2606         }
2607
2608         // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output.
2609         // We'll keep this commitment transaction to broadcast once it's revoked.
2610         let revoked_local_txn = get_local_commitment_txn!(nodes[0], channel_id);
2611         assert_eq!(revoked_local_txn.len(), 1);
2612         let revoked_commitment_tx = &revoked_local_txn[0];
2613
2614         // Send another payment, now revoking the previous commitment tx
2615         send_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000);
2616
2617         let justice_tx = persisters[1].justice_tx(funding_txo, &revoked_commitment_tx.txid()).unwrap();
2618         check_spends!(justice_tx, revoked_commitment_tx);
2619
2620         mine_transactions(&nodes[1], &[revoked_commitment_tx, &justice_tx]);
2621         mine_transactions(&nodes[0], &[revoked_commitment_tx, &justice_tx]);
2622
2623         check_added_monitors!(nodes[1], 1);
2624         check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false,
2625                 &[nodes[0].node.get_our_node_id()], 100_000);
2626         get_announce_close_broadcast_events(&nodes, 1, 0);
2627
2628         check_added_monitors!(nodes[0], 1);
2629         check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false,
2630                 &[nodes[1].node.get_our_node_id()], 100_000);
2631
2632         // Check that the justice tx has sent the revoked output value to nodes[1]
2633         let monitor = get_monitor!(nodes[1], channel_id);
2634         let total_claimable_balance = monitor.get_claimable_balances().iter().fold(0, |sum, balance| {
2635                 match balance {
2636                         channelmonitor::Balance::ClaimableAwaitingConfirmations { amount_satoshis, .. } => sum + amount_satoshis,
2637                         _ => panic!("Unexpected balance type"),
2638                 }
2639         });
2640         // On the first commitment, node[1]'s balance was below dust so it didn't have an output
2641         let node1_channel_balance = if broadcast_initial_commitment { 0 } else { revoked_commitment_tx.output[0].value };
2642         let expected_claimable_balance = node1_channel_balance + justice_tx.output[0].value;
2643         assert_eq!(total_claimable_balance, expected_claimable_balance);
2644 }
2645
2646
2647 #[test]
2648 fn claim_htlc_outputs_shared_tx() {
2649         // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx
2650         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2651         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2652         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2653         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2654         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2655
2656         // Create some new channel:
2657         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2658
2659         // Rebalance the network to generate htlc in the two directions
2660         send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
2661         // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx
2662         let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
2663         let (_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
2664
2665         // Get the will-be-revoked local txn from node[0]
2666         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2667         assert_eq!(revoked_local_txn.len(), 2); // commitment tx + 1 HTLC-Timeout tx
2668         assert_eq!(revoked_local_txn[0].input.len(), 1);
2669         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
2670         assert_eq!(revoked_local_txn[1].input.len(), 1);
2671         assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
2672         assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
2673         check_spends!(revoked_local_txn[1], revoked_local_txn[0]);
2674
2675         //Revoke the old state
2676         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
2677
2678         {
2679                 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2680                 check_added_monitors!(nodes[0], 1);
2681                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2682                 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2683                 check_added_monitors!(nodes[1], 1);
2684                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2685                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2686                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
2687
2688                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2689                 assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2690
2691                 assert_eq!(node_txn[0].input.len(), 3); // Claim the revoked output + both revoked HTLC outputs
2692                 check_spends!(node_txn[0], revoked_local_txn[0]);
2693
2694                 let mut witness_lens = BTreeSet::new();
2695                 witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
2696                 witness_lens.insert(node_txn[0].input[1].witness.last().unwrap().len());
2697                 witness_lens.insert(node_txn[0].input[2].witness.last().unwrap().len());
2698                 assert_eq!(witness_lens.len(), 3);
2699                 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
2700                 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
2701                 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
2702
2703                 // Finally, mine the penalty transaction and check that we get an HTLC failure after
2704                 // ANTI_REORG_DELAY confirmations.
2705                 mine_transaction(&nodes[1], &node_txn[0]);
2706                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2707                 expect_payment_failed!(nodes[1], payment_hash_2, false);
2708         }
2709         get_announce_close_broadcast_events(&nodes, 0, 1);
2710         assert_eq!(nodes[0].node.list_channels().len(), 0);
2711         assert_eq!(nodes[1].node.list_channels().len(), 0);
2712 }
2713
2714 #[test]
2715 fn claim_htlc_outputs_single_tx() {
2716         // Node revoked old state, htlcs have timed out, claim each of them in separated justice tx
2717         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2718         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2719         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2720         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2721         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2722
2723         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2724
2725         // Rebalance the network to generate htlc in the two directions
2726         send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
2727         // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this
2728         // time as two different claim transactions as we're gonna to timeout htlc with given a high current height
2729         let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
2730         let (_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
2731
2732         // Get the will-be-revoked local txn from node[0]
2733         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2734
2735         //Revoke the old state
2736         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
2737
2738         {
2739                 confirm_transaction_at(&nodes[0], &revoked_local_txn[0], 100);
2740                 check_added_monitors!(nodes[0], 1);
2741                 confirm_transaction_at(&nodes[1], &revoked_local_txn[0], 100);
2742                 check_added_monitors!(nodes[1], 1);
2743                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2744                 let mut events = nodes[0].node.get_and_clear_pending_events();
2745                 expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
2746                 match events.last().unwrap() {
2747                         Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
2748                         _ => panic!("Unexpected event"),
2749                 }
2750
2751                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2752                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
2753
2754                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcast();
2755
2756                 // Check the pair local commitment and HTLC-timeout broadcast due to HTLC expiration
2757                 assert_eq!(node_txn[0].input.len(), 1);
2758                 check_spends!(node_txn[0], chan_1.3);
2759                 assert_eq!(node_txn[1].input.len(), 1);
2760                 let witness_script = node_txn[1].input[0].witness.last().unwrap();
2761                 assert_eq!(witness_script.len(), OFFERED_HTLC_SCRIPT_WEIGHT); //Spending an offered htlc output
2762                 check_spends!(node_txn[1], node_txn[0]);
2763
2764                 // Filter out any non justice transactions.
2765                 node_txn.retain(|tx| tx.input[0].previous_output.txid == revoked_local_txn[0].txid());
2766                 assert!(node_txn.len() > 3);
2767
2768                 assert_eq!(node_txn[0].input.len(), 1);
2769                 assert_eq!(node_txn[1].input.len(), 1);
2770                 assert_eq!(node_txn[2].input.len(), 1);
2771
2772                 check_spends!(node_txn[0], revoked_local_txn[0]);
2773                 check_spends!(node_txn[1], revoked_local_txn[0]);
2774                 check_spends!(node_txn[2], revoked_local_txn[0]);
2775
2776                 let mut witness_lens = BTreeSet::new();
2777                 witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
2778                 witness_lens.insert(node_txn[1].input[0].witness.last().unwrap().len());
2779                 witness_lens.insert(node_txn[2].input[0].witness.last().unwrap().len());
2780                 assert_eq!(witness_lens.len(), 3);
2781                 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
2782                 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
2783                 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
2784
2785                 // Finally, mine the penalty transactions and check that we get an HTLC failure after
2786                 // ANTI_REORG_DELAY confirmations.
2787                 mine_transaction(&nodes[1], &node_txn[0]);
2788                 mine_transaction(&nodes[1], &node_txn[1]);
2789                 mine_transaction(&nodes[1], &node_txn[2]);
2790                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2791                 expect_payment_failed!(nodes[1], payment_hash_2, false);
2792         }
2793         get_announce_close_broadcast_events(&nodes, 0, 1);
2794         assert_eq!(nodes[0].node.list_channels().len(), 0);
2795         assert_eq!(nodes[1].node.list_channels().len(), 0);
2796 }
2797
2798 #[test]
2799 fn test_htlc_on_chain_success() {
2800         // Test that in case of a unilateral close onchain, we detect the state of output and pass
2801         // the preimage backward accordingly. So here we test that ChannelManager is
2802         // broadcasting the right event to other nodes in payment path.
2803         // We test with two HTLCs simultaneously as that was not handled correctly in the past.
2804         // A --------------------> B ----------------------> C (preimage)
2805         // First, C should claim the HTLC outputs via HTLC-Success when its own latest local
2806         // commitment transaction was broadcast.
2807         // Then, B should learn the preimage from said transactions, attempting to claim backwards
2808         // towards B.
2809         // B should be able to claim via preimage if A then broadcasts its local tx.
2810         // Finally, when A sees B's latest local commitment transaction it should be able to claim
2811         // the HTLC outputs via the preimage it learned (which, once confirmed should generate a
2812         // PaymentSent event).
2813
2814         let chanmon_cfgs = create_chanmon_cfgs(3);
2815         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2816         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2817         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2818
2819         // Create some initial channels
2820         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2821         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
2822
2823         // Ensure all nodes are at the same height
2824         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
2825         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
2826         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
2827         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
2828
2829         // Rebalance the network a bit by relaying one payment through all the channels...
2830         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2831         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2832
2833         let (our_payment_preimage, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
2834         let (our_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
2835
2836         // Broadcast legit commitment tx from C on B's chain
2837         // Broadcast HTLC Success transaction by C on received output from C's commitment tx on B's chain
2838         let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
2839         assert_eq!(commitment_tx.len(), 1);
2840         check_spends!(commitment_tx[0], chan_2.3);
2841         nodes[2].node.claim_funds(our_payment_preimage);
2842         expect_payment_claimed!(nodes[2], payment_hash_1, 3_000_000);
2843         nodes[2].node.claim_funds(our_payment_preimage_2);
2844         expect_payment_claimed!(nodes[2], payment_hash_2, 3_000_000);
2845         check_added_monitors!(nodes[2], 2);
2846         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2847         assert!(updates.update_add_htlcs.is_empty());
2848         assert!(updates.update_fail_htlcs.is_empty());
2849         assert!(updates.update_fail_malformed_htlcs.is_empty());
2850         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2851
2852         mine_transaction(&nodes[2], &commitment_tx[0]);
2853         check_closed_broadcast!(nodes[2], true);
2854         check_added_monitors!(nodes[2], 1);
2855         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2856         let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 2 (2 * HTLC-Success tx)
2857         assert_eq!(node_txn.len(), 2);
2858         check_spends!(node_txn[0], commitment_tx[0]);
2859         check_spends!(node_txn[1], commitment_tx[0]);
2860         assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2861         assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2862         assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2863         assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2864         assert_eq!(node_txn[0].lock_time, LockTime::ZERO);
2865         assert_eq!(node_txn[1].lock_time, LockTime::ZERO);
2866
2867         // Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward
2868         connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), node_txn[0].clone(), node_txn[1].clone()]));
2869         connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
2870         {
2871                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
2872                 assert_eq!(added_monitors.len(), 1);
2873                 assert_eq!(added_monitors[0].0.txid, chan_2.3.txid());
2874                 added_monitors.clear();
2875         }
2876         let forwarded_events = nodes[1].node.get_and_clear_pending_events();
2877         assert_eq!(forwarded_events.len(), 3);
2878         match forwarded_events[0] {
2879                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
2880                 _ => panic!("Unexpected event"),
2881         }
2882         let chan_id = Some(chan_1.2);
2883         match forwarded_events[1] {
2884                 Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => {
2885                         assert_eq!(fee_earned_msat, Some(1000));
2886                         assert_eq!(prev_channel_id, chan_id);
2887                         assert_eq!(claim_from_onchain_tx, true);
2888                         assert_eq!(next_channel_id, Some(chan_2.2));
2889                         assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
2890                 },
2891                 _ => panic!()
2892         }
2893         match forwarded_events[2] {
2894                 Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => {
2895                         assert_eq!(fee_earned_msat, Some(1000));
2896                         assert_eq!(prev_channel_id, chan_id);
2897                         assert_eq!(claim_from_onchain_tx, true);
2898                         assert_eq!(next_channel_id, Some(chan_2.2));
2899                         assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
2900                 },
2901                 _ => panic!()
2902         }
2903         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
2904         {
2905                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
2906                 assert_eq!(added_monitors.len(), 2);
2907                 assert_eq!(added_monitors[0].0.txid, chan_1.3.txid());
2908                 assert_eq!(added_monitors[1].0.txid, chan_1.3.txid());
2909                 added_monitors.clear();
2910         }
2911         assert_eq!(events.len(), 3);
2912
2913         let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
2914         let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events);
2915
2916         match nodes_2_event {
2917                 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id: _ } => {},
2918                 _ => panic!("Unexpected event"),
2919         }
2920
2921         match nodes_0_event {
2922                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
2923                         assert!(update_add_htlcs.is_empty());
2924                         assert!(update_fail_htlcs.is_empty());
2925                         assert_eq!(update_fulfill_htlcs.len(), 1);
2926                         assert!(update_fail_malformed_htlcs.is_empty());
2927                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
2928                 },
2929                 _ => panic!("Unexpected event"),
2930         };
2931
2932         // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
2933         match events[0] {
2934                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
2935                 _ => panic!("Unexpected event"),
2936         }
2937
2938         macro_rules! check_tx_local_broadcast {
2939                 ($node: expr, $htlc_offered: expr, $commitment_tx: expr) => { {
2940                         let mut node_txn = $node.tx_broadcaster.txn_broadcasted.lock().unwrap();
2941                         assert_eq!(node_txn.len(), 2);
2942                         // Node[1]: 2 * HTLC-timeout tx
2943                         // Node[0]: 2 * HTLC-timeout tx
2944                         check_spends!(node_txn[0], $commitment_tx);
2945                         check_spends!(node_txn[1], $commitment_tx);
2946                         assert_ne!(node_txn[0].lock_time, LockTime::ZERO);
2947                         assert_ne!(node_txn[1].lock_time, LockTime::ZERO);
2948                         if $htlc_offered {
2949                                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2950                                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2951                                 assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2952                                 assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2953                         } else {
2954                                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2955                                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2956                                 assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
2957                                 assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
2958                         }
2959                         node_txn.clear();
2960                 } }
2961         }
2962         // nodes[1] now broadcasts its own timeout-claim of the output that nodes[2] just claimed via success.
2963         check_tx_local_broadcast!(nodes[1], false, commitment_tx[0]);
2964
2965         // Broadcast legit commitment tx from A on B's chain
2966         // Broadcast preimage tx by B on offered output from A commitment tx  on A's chain
2967         let node_a_commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
2968         check_spends!(node_a_commitment_tx[0], chan_1.3);
2969         mine_transaction(&nodes[1], &node_a_commitment_tx[0]);
2970         check_closed_broadcast!(nodes[1], true);
2971         check_added_monitors!(nodes[1], 1);
2972         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2973         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
2974         assert!(node_txn.len() == 1 || node_txn.len() == 3); // HTLC-Success, 2* RBF bumps of above HTLC txn
2975         let commitment_spend =
2976                 if node_txn.len() == 1 {
2977                         &node_txn[0]
2978                 } else {
2979                         // Certain `ConnectStyle`s will cause RBF bumps of the previous HTLC transaction to be broadcast.
2980                         // FullBlockViaListen
2981                         if node_txn[0].input[0].previous_output.txid == node_a_commitment_tx[0].txid() {
2982                                 check_spends!(node_txn[1], commitment_tx[0]);
2983                                 check_spends!(node_txn[2], commitment_tx[0]);
2984                                 assert_ne!(node_txn[1].input[0].previous_output.vout, node_txn[2].input[0].previous_output.vout);
2985                                 &node_txn[0]
2986                         } else {
2987                                 check_spends!(node_txn[0], commitment_tx[0]);
2988                                 check_spends!(node_txn[1], commitment_tx[0]);
2989                                 assert_ne!(node_txn[0].input[0].previous_output.vout, node_txn[1].input[0].previous_output.vout);
2990                                 &node_txn[2]
2991                         }
2992                 };
2993
2994         check_spends!(commitment_spend, node_a_commitment_tx[0]);
2995         assert_eq!(commitment_spend.input.len(), 2);
2996         assert_eq!(commitment_spend.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2997         assert_eq!(commitment_spend.input[1].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2998         assert_eq!(commitment_spend.lock_time.to_consensus_u32(), nodes[1].best_block_info().1);
2999         assert!(commitment_spend.output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
3000         // We don't bother to check that B can claim the HTLC output on its commitment tx here as
3001         // we already checked the same situation with A.
3002
3003         // Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent
3004         connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_a_commitment_tx[0].clone(), commitment_spend.clone()]));
3005         connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
3006         check_closed_broadcast!(nodes[0], true);
3007         check_added_monitors!(nodes[0], 1);
3008         let events = nodes[0].node.get_and_clear_pending_events();
3009         assert_eq!(events.len(), 5);
3010         let mut first_claimed = false;
3011         for event in events {
3012                 match event {
3013                         Event::PaymentSent { payment_preimage, payment_hash, .. } => {
3014                                 if payment_preimage == our_payment_preimage && payment_hash == payment_hash_1 {
3015                                         assert!(!first_claimed);
3016                                         first_claimed = true;
3017                                 } else {
3018                                         assert_eq!(payment_preimage, our_payment_preimage_2);
3019                                         assert_eq!(payment_hash, payment_hash_2);
3020                                 }
3021                         },
3022                         Event::PaymentPathSuccessful { .. } => {},
3023                         Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {},
3024                         _ => panic!("Unexpected event"),
3025                 }
3026         }
3027         check_tx_local_broadcast!(nodes[0], true, node_a_commitment_tx[0]);
3028 }
3029
3030 fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
3031         // Test that in case of a unilateral close onchain, we detect the state of output and
3032         // timeout the HTLC backward accordingly. So here we test that ChannelManager is
3033         // broadcasting the right event to other nodes in payment path.
3034         // A ------------------> B ----------------------> C (timeout)
3035         //    B's commitment tx                 C's commitment tx
3036         //            \                                  \
3037         //         B's HTLC timeout tx               B's timeout tx
3038
3039         let chanmon_cfgs = create_chanmon_cfgs(3);
3040         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3041         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3042         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3043         *nodes[0].connect_style.borrow_mut() = connect_style;
3044         *nodes[1].connect_style.borrow_mut() = connect_style;
3045         *nodes[2].connect_style.borrow_mut() = connect_style;
3046
3047         // Create some intial channels
3048         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
3049         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3050
3051         // Rebalance the network a bit by relaying one payment thorugh all the channels...
3052         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
3053         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
3054
3055         let (_payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
3056
3057         // Broadcast legit commitment tx from C on B's chain
3058         let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
3059         check_spends!(commitment_tx[0], chan_2.3);
3060         nodes[2].node.fail_htlc_backwards(&payment_hash);
3061         check_added_monitors!(nodes[2], 0);
3062         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]);
3063         check_added_monitors!(nodes[2], 1);
3064
3065         let events = nodes[2].node.get_and_clear_pending_msg_events();
3066         assert_eq!(events.len(), 1);
3067         match events[0] {
3068                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3069                         assert!(update_add_htlcs.is_empty());
3070                         assert!(!update_fail_htlcs.is_empty());
3071                         assert!(update_fulfill_htlcs.is_empty());
3072                         assert!(update_fail_malformed_htlcs.is_empty());
3073                         assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
3074                 },
3075                 _ => panic!("Unexpected event"),
3076         };
3077         mine_transaction(&nodes[2], &commitment_tx[0]);
3078         check_closed_broadcast!(nodes[2], true);
3079         check_added_monitors!(nodes[2], 1);
3080         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
3081         let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
3082         assert_eq!(node_txn.len(), 0);
3083
3084         // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain
3085         // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
3086         mine_transaction(&nodes[1], &commitment_tx[0]);
3087         check_closed_event!(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false
3088                 , [nodes[2].node.get_our_node_id()], 100000);
3089         connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1);
3090         let timeout_tx = {
3091                 let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
3092                 if nodes[1].connect_style.borrow().skips_blocks() {
3093                         assert_eq!(txn.len(), 1);
3094                 } else {
3095                         assert_eq!(txn.len(), 3); // Two extra fee bumps for timeout transaction
3096                 }
3097                 txn.iter().for_each(|tx| check_spends!(tx, commitment_tx[0]));
3098                 assert_eq!(txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3099                 txn.remove(0)
3100         };
3101
3102         mine_transaction(&nodes[1], &timeout_tx);
3103         check_added_monitors!(nodes[1], 1);
3104         check_closed_broadcast!(nodes[1], true);
3105
3106         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3107
3108         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
3109         check_added_monitors!(nodes[1], 1);
3110         let events = nodes[1].node.get_and_clear_pending_msg_events();
3111         assert_eq!(events.len(), 1);
3112         match events[0] {
3113                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3114                         assert!(update_add_htlcs.is_empty());
3115                         assert!(!update_fail_htlcs.is_empty());
3116                         assert!(update_fulfill_htlcs.is_empty());
3117                         assert!(update_fail_malformed_htlcs.is_empty());
3118                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3119                 },
3120                 _ => panic!("Unexpected event"),
3121         };
3122
3123         // Broadcast legit commitment tx from B on A's chain
3124         let commitment_tx = get_local_commitment_txn!(nodes[1], chan_1.2);
3125         check_spends!(commitment_tx[0], chan_1.3);
3126
3127         mine_transaction(&nodes[0], &commitment_tx[0]);
3128         connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
3129
3130         check_closed_broadcast!(nodes[0], true);
3131         check_added_monitors!(nodes[0], 1);
3132         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
3133         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // 1 timeout tx
3134         assert_eq!(node_txn.len(), 1);
3135         check_spends!(node_txn[0], commitment_tx[0]);
3136         assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3137 }
3138
3139 #[test]
3140 fn test_htlc_on_chain_timeout() {
3141         do_test_htlc_on_chain_timeout(ConnectStyle::BestBlockFirstSkippingBlocks);
3142         do_test_htlc_on_chain_timeout(ConnectStyle::TransactionsFirstSkippingBlocks);
3143         do_test_htlc_on_chain_timeout(ConnectStyle::FullBlockViaListen);
3144 }
3145
3146 #[test]
3147 fn test_simple_commitment_revoked_fail_backward() {
3148         // Test that in case of a revoked commitment tx, we detect the resolution of output by justice tx
3149         // and fail backward accordingly.
3150
3151         let chanmon_cfgs = create_chanmon_cfgs(3);
3152         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3153         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3154         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3155
3156         // Create some initial channels
3157         create_announced_chan_between_nodes(&nodes, 0, 1);
3158         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3159
3160         let (payment_preimage, _payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
3161         // Get the will-be-revoked local txn from nodes[2]
3162         let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
3163         // Revoke the old state
3164         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
3165
3166         let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
3167
3168         mine_transaction(&nodes[1], &revoked_local_txn[0]);
3169         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
3170         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3171         check_added_monitors!(nodes[1], 1);
3172         check_closed_broadcast!(nodes[1], true);
3173
3174         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
3175         check_added_monitors!(nodes[1], 1);
3176         let events = nodes[1].node.get_and_clear_pending_msg_events();
3177         assert_eq!(events.len(), 1);
3178         match events[0] {
3179                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
3180                         assert!(update_add_htlcs.is_empty());
3181                         assert_eq!(update_fail_htlcs.len(), 1);
3182                         assert!(update_fulfill_htlcs.is_empty());
3183                         assert!(update_fail_malformed_htlcs.is_empty());
3184                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3185
3186                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
3187                         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
3188                         expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_2.0.contents.short_channel_id, true);
3189                 },
3190                 _ => panic!("Unexpected event"),
3191         }
3192 }
3193
3194 fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use_dust: bool, no_to_remote: bool) {
3195         // Test that if our counterparty broadcasts a revoked commitment transaction we fail all
3196         // pending HTLCs on that channel backwards even if the HTLCs aren't present in our latest
3197         // commitment transaction anymore.
3198         // To do this, we have the peer which will broadcast a revoked commitment transaction send
3199         // a number of update_fail/commitment_signed updates without ever sending the RAA in
3200         // response to our commitment_signed. This is somewhat misbehavior-y, though not
3201         // technically disallowed and we should probably handle it reasonably.
3202         // Note that this is pretty exhaustive as an outbound HTLC which we haven't yet
3203         // failed/fulfilled backwards must be in at least one of the latest two remote commitment
3204         // transactions:
3205         // * Once we move it out of our holding cell/add it, we will immediately include it in a
3206         //   commitment_signed (implying it will be in the latest remote commitment transaction).
3207         // * Once they remove it, we will send a (the first) commitment_signed without the HTLC,
3208         //   and once they revoke the previous commitment transaction (allowing us to send a new
3209         //   commitment_signed) we will be free to fail/fulfill the HTLC backwards.
3210         let chanmon_cfgs = create_chanmon_cfgs(3);
3211         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3212         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3213         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3214
3215         // Create some initial channels
3216         create_announced_chan_between_nodes(&nodes, 0, 1);
3217         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3218
3219         let (payment_preimage, _payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], if no_to_remote { 10_000 } else { 3_000_000 });
3220         // Get the will-be-revoked local txn from nodes[2]
3221         let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
3222         assert_eq!(revoked_local_txn[0].output.len(), if no_to_remote { 1 } else { 2 });
3223         // Revoke the old state
3224         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
3225
3226         let value = if use_dust {
3227                 // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as
3228                 // well, so HTLCs at exactly the dust limit will not be included in commitment txn.
3229                 nodes[2].node.per_peer_state.read().unwrap().get(&nodes[1].node.get_our_node_id())
3230                         .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context().holder_dust_limit_satoshis * 1000
3231         } else { 3000000 };
3232
3233         let (_, first_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3234         let (_, second_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3235         let (_, third_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3236
3237         nodes[2].node.fail_htlc_backwards(&first_payment_hash);
3238         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]);
3239         check_added_monitors!(nodes[2], 1);
3240         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3241         assert!(updates.update_add_htlcs.is_empty());
3242         assert!(updates.update_fulfill_htlcs.is_empty());
3243         assert!(updates.update_fail_malformed_htlcs.is_empty());
3244         assert_eq!(updates.update_fail_htlcs.len(), 1);
3245         assert!(updates.update_fee.is_none());
3246         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3247         let bs_raa = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
3248         // Drop the last RAA from 3 -> 2
3249
3250         nodes[2].node.fail_htlc_backwards(&second_payment_hash);
3251         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: second_payment_hash }]);
3252         check_added_monitors!(nodes[2], 1);
3253         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3254         assert!(updates.update_add_htlcs.is_empty());
3255         assert!(updates.update_fulfill_htlcs.is_empty());
3256         assert!(updates.update_fail_malformed_htlcs.is_empty());
3257         assert_eq!(updates.update_fail_htlcs.len(), 1);
3258         assert!(updates.update_fee.is_none());
3259         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3260         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
3261         check_added_monitors!(nodes[1], 1);
3262         // Note that nodes[1] is in AwaitingRAA, so won't send a CS
3263         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
3264         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
3265         check_added_monitors!(nodes[2], 1);
3266
3267         nodes[2].node.fail_htlc_backwards(&third_payment_hash);
3268         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: third_payment_hash }]);
3269         check_added_monitors!(nodes[2], 1);
3270         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3271         assert!(updates.update_add_htlcs.is_empty());
3272         assert!(updates.update_fulfill_htlcs.is_empty());
3273         assert!(updates.update_fail_malformed_htlcs.is_empty());
3274         assert_eq!(updates.update_fail_htlcs.len(), 1);
3275         assert!(updates.update_fee.is_none());
3276         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3277         // At this point first_payment_hash has dropped out of the latest two commitment
3278         // transactions that nodes[1] is tracking...
3279         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
3280         check_added_monitors!(nodes[1], 1);
3281         // Note that nodes[1] is (still) in AwaitingRAA, so won't send a CS
3282         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
3283         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
3284         check_added_monitors!(nodes[2], 1);
3285
3286         // Add a fourth HTLC, this one will get sequestered away in nodes[1]'s holding cell waiting
3287         // on nodes[2]'s RAA.
3288         let (route, fourth_payment_hash, _, fourth_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 1000000);
3289         nodes[1].node.send_payment_with_route(&route, fourth_payment_hash,
3290                 RecipientOnionFields::secret_only(fourth_payment_secret), PaymentId(fourth_payment_hash.0)).unwrap();
3291         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3292         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3293         check_added_monitors!(nodes[1], 0);
3294
3295         if deliver_bs_raa {
3296                 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_raa);
3297                 // One monitor for the new revocation preimage, no second on as we won't generate a new
3298                 // commitment transaction for nodes[0] until process_pending_htlc_forwards().
3299                 check_added_monitors!(nodes[1], 1);
3300                 let events = nodes[1].node.get_and_clear_pending_events();
3301                 assert_eq!(events.len(), 2);
3302                 match events[0] {
3303                         Event::PendingHTLCsForwardable { .. } => { },
3304                         _ => panic!("Unexpected event"),
3305                 };
3306                 match events[1] {
3307                         Event::HTLCHandlingFailed { .. } => { },
3308                         _ => panic!("Unexpected event"),
3309                 }
3310                 // Deliberately don't process the pending fail-back so they all fail back at once after
3311                 // block connection just like the !deliver_bs_raa case
3312         }
3313
3314         let mut failed_htlcs = HashSet::new();
3315         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3316
3317         mine_transaction(&nodes[1], &revoked_local_txn[0]);
3318         check_added_monitors!(nodes[1], 1);
3319         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3320
3321         let events = nodes[1].node.get_and_clear_pending_events();
3322         assert_eq!(events.len(), if deliver_bs_raa { 3 + nodes.len() - 1 } else { 4 + nodes.len() });
3323         match events[0] {
3324                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { },
3325                 _ => panic!("Unexepected event"),
3326         }
3327         match events[1] {
3328                 Event::PaymentPathFailed { ref payment_hash, .. } => {
3329                         assert_eq!(*payment_hash, fourth_payment_hash);
3330                 },
3331                 _ => panic!("Unexpected event"),
3332         }
3333         match events[2] {
3334                 Event::PaymentFailed { ref payment_hash, .. } => {
3335                         assert_eq!(*payment_hash, fourth_payment_hash);
3336                 },
3337                 _ => panic!("Unexpected event"),
3338         }
3339
3340         nodes[1].node.process_pending_htlc_forwards();
3341         check_added_monitors!(nodes[1], 1);
3342
3343         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
3344         assert_eq!(events.len(), if deliver_bs_raa { 4 } else { 3 });
3345
3346         if deliver_bs_raa {
3347                 let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
3348                 match nodes_2_event {
3349                         MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3350                                 assert_eq!(nodes[2].node.get_our_node_id(), *node_id);
3351                                 assert_eq!(update_add_htlcs.len(), 1);
3352                                 assert!(update_fulfill_htlcs.is_empty());
3353                                 assert!(update_fail_htlcs.is_empty());
3354                                 assert!(update_fail_malformed_htlcs.is_empty());
3355                         },
3356                         _ => panic!("Unexpected event"),
3357                 }
3358         }
3359
3360         let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
3361         match nodes_2_event {
3362                 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { msg: Some(msgs::ErrorMessage { channel_id, ref data }) }, node_id: _ } => {
3363                         assert_eq!(channel_id, chan_2.2);
3364                         assert_eq!(data.as_str(), "Channel closed because commitment or closing transaction was confirmed on chain.");
3365                 },
3366                 _ => panic!("Unexpected event"),
3367         }
3368
3369         let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events);
3370         match nodes_0_event {
3371                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
3372                         assert!(update_add_htlcs.is_empty());
3373                         assert_eq!(update_fail_htlcs.len(), 3);
3374                         assert!(update_fulfill_htlcs.is_empty());
3375                         assert!(update_fail_malformed_htlcs.is_empty());
3376                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3377
3378                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
3379                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[1]);
3380                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[2]);
3381
3382                         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
3383
3384                         let events = nodes[0].node.get_and_clear_pending_events();
3385                         assert_eq!(events.len(), 6);
3386                         match events[0] {
3387                                 Event::PaymentPathFailed { ref payment_hash, ref failure, .. } => {
3388                                         assert!(failed_htlcs.insert(payment_hash.0));
3389                                         // If we delivered B's RAA we got an unknown preimage error, not something
3390                                         // that we should update our routing table for.
3391                                         if !deliver_bs_raa {
3392                                                 if let PathFailure::OnPath { network_update: Some(_) } = failure { } else { panic!("Unexpected path failure") }
3393                                         }
3394                                 },
3395                                 _ => panic!("Unexpected event"),
3396                         }
3397                         match events[1] {
3398                                 Event::PaymentFailed { ref payment_hash, .. } => {
3399                                         assert_eq!(*payment_hash, first_payment_hash);
3400                                 },
3401                                 _ => panic!("Unexpected event"),
3402                         }
3403                         match events[2] {
3404                                 Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => {
3405                                         assert!(failed_htlcs.insert(payment_hash.0));
3406                                 },
3407                                 _ => panic!("Unexpected event"),
3408                         }
3409                         match events[3] {
3410                                 Event::PaymentFailed { ref payment_hash, .. } => {
3411                                         assert_eq!(*payment_hash, second_payment_hash);
3412                                 },
3413                                 _ => panic!("Unexpected event"),
3414                         }
3415                         match events[4] {
3416                                 Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => {
3417                                         assert!(failed_htlcs.insert(payment_hash.0));
3418                                 },
3419                                 _ => panic!("Unexpected event"),
3420                         }
3421                         match events[5] {
3422                                 Event::PaymentFailed { ref payment_hash, .. } => {
3423                                         assert_eq!(*payment_hash, third_payment_hash);
3424                                 },
3425                                 _ => panic!("Unexpected event"),
3426                         }
3427                 },
3428                 _ => panic!("Unexpected event"),
3429         }
3430
3431         // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
3432         match events[0] {
3433                 MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {},
3434                 _ => panic!("Unexpected event"),
3435         }
3436
3437         assert!(failed_htlcs.contains(&first_payment_hash.0));
3438         assert!(failed_htlcs.contains(&second_payment_hash.0));
3439         assert!(failed_htlcs.contains(&third_payment_hash.0));
3440 }
3441
3442 #[test]
3443 fn test_commitment_revoked_fail_backward_exhaustive_a() {
3444         do_test_commitment_revoked_fail_backward_exhaustive(false, true, false);
3445         do_test_commitment_revoked_fail_backward_exhaustive(true, true, false);
3446         do_test_commitment_revoked_fail_backward_exhaustive(false, false, false);
3447         do_test_commitment_revoked_fail_backward_exhaustive(true, false, false);
3448 }
3449
3450 #[test]
3451 fn test_commitment_revoked_fail_backward_exhaustive_b() {
3452         do_test_commitment_revoked_fail_backward_exhaustive(false, true, true);
3453         do_test_commitment_revoked_fail_backward_exhaustive(true, true, true);
3454         do_test_commitment_revoked_fail_backward_exhaustive(false, false, true);
3455         do_test_commitment_revoked_fail_backward_exhaustive(true, false, true);
3456 }
3457
3458 #[test]
3459 fn fail_backward_pending_htlc_upon_channel_failure() {
3460         let chanmon_cfgs = create_chanmon_cfgs(2);
3461         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3462         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3463         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3464         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000);
3465
3466         // Alice -> Bob: Route a payment but without Bob sending revoke_and_ack.
3467         {
3468                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000);
3469                 nodes[0].node.send_payment_with_route(&route, payment_hash, RecipientOnionFields::secret_only(payment_secret),
3470                         PaymentId(payment_hash.0)).unwrap();
3471                 check_added_monitors!(nodes[0], 1);
3472
3473                 let payment_event = {
3474                         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3475                         assert_eq!(events.len(), 1);
3476                         SendEvent::from_event(events.remove(0))
3477                 };
3478                 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
3479                 assert_eq!(payment_event.msgs.len(), 1);
3480         }
3481
3482         // Alice -> Bob: Route another payment but now Alice waits for Bob's earlier revoke_and_ack.
3483         let (route, failed_payment_hash, _, failed_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000);
3484         {
3485                 nodes[0].node.send_payment_with_route(&route, failed_payment_hash,
3486                         RecipientOnionFields::secret_only(failed_payment_secret), PaymentId(failed_payment_hash.0)).unwrap();
3487                 check_added_monitors!(nodes[0], 0);
3488
3489                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3490         }
3491
3492         // Alice <- Bob: Send a malformed update_add_htlc so Alice fails the channel.
3493         {
3494                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 50_000);
3495
3496                 let secp_ctx = Secp256k1::new();
3497                 let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
3498                 let current_height = nodes[1].node.best_block.read().unwrap().height() + 1;
3499                 let (onion_payloads, _amount_msat, cltv_expiry) = onion_utils::build_onion_payloads(
3500                         &route.paths[0], 50_000, RecipientOnionFields::secret_only(payment_secret), current_height, &None).unwrap();
3501                 let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
3502                 let onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
3503
3504                 // Send a 0-msat update_add_htlc to fail the channel.
3505                 let update_add_htlc = msgs::UpdateAddHTLC {
3506                         channel_id: chan.2,
3507                         htlc_id: 0,
3508                         amount_msat: 0,
3509                         payment_hash,
3510                         cltv_expiry,
3511                         onion_routing_packet,
3512                         skimmed_fee_msat: None,
3513                 };
3514                 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc);
3515         }
3516         let events = nodes[0].node.get_and_clear_pending_events();
3517         assert_eq!(events.len(), 3);
3518         // Check that Alice fails backward the pending HTLC from the second payment.
3519         match events[0] {
3520                 Event::PaymentPathFailed { payment_hash, .. } => {
3521                         assert_eq!(payment_hash, failed_payment_hash);
3522                 },
3523                 _ => panic!("Unexpected event"),
3524         }
3525         match events[1] {
3526                 Event::PaymentFailed { payment_hash, .. } => {
3527                         assert_eq!(payment_hash, failed_payment_hash);
3528                 },
3529                 _ => panic!("Unexpected event"),
3530         }
3531         match events[2] {
3532                 Event::ChannelClosed { reason: ClosureReason::ProcessingError { ref err }, .. } => {
3533                         assert_eq!(err, "Remote side tried to send a 0-msat HTLC");
3534                 },
3535                 _ => panic!("Unexpected event {:?}", events[1]),
3536         }
3537         check_closed_broadcast!(nodes[0], true);
3538         check_added_monitors!(nodes[0], 1);
3539 }
3540
3541 #[test]
3542 fn test_htlc_ignore_latest_remote_commitment() {
3543         // Test that HTLC transactions spending the latest remote commitment transaction are simply
3544         // ignored if we cannot claim them. This originally tickled an invalid unwrap().
3545         let chanmon_cfgs = create_chanmon_cfgs(2);
3546         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3547         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3548         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3549         if *nodes[1].connect_style.borrow() == ConnectStyle::FullBlockViaListen {
3550                 // We rely on the ability to connect a block redundantly, which isn't allowed via
3551                 // `chain::Listen`, so we never run the test if we randomly get assigned that
3552                 // connect_style.
3553                 return;
3554         }
3555         create_announced_chan_between_nodes(&nodes, 0, 1);
3556
3557         route_payment(&nodes[0], &[&nodes[1]], 10000000);
3558         nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
3559         connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
3560         check_closed_broadcast!(nodes[0], true);
3561         check_added_monitors!(nodes[0], 1);
3562         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
3563
3564         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3565         assert_eq!(node_txn.len(), 3);
3566         assert_eq!(node_txn[0].txid(), node_txn[1].txid());
3567
3568         let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[1].clone()]);
3569         connect_block(&nodes[1], &block);
3570         check_closed_broadcast!(nodes[1], true);
3571         check_added_monitors!(nodes[1], 1);
3572         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
3573
3574         // Duplicate the connect_block call since this may happen due to other listeners
3575         // registering new transactions
3576         connect_block(&nodes[1], &block);
3577 }
3578
3579 #[test]
3580 fn test_force_close_fail_back() {
3581         // Check which HTLCs are failed-backwards on channel force-closure
3582         let chanmon_cfgs = create_chanmon_cfgs(3);
3583         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3584         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3585         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3586         create_announced_chan_between_nodes(&nodes, 0, 1);
3587         create_announced_chan_between_nodes(&nodes, 1, 2);
3588
3589         let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000);
3590
3591         let mut payment_event = {
3592                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
3593                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
3594                 check_added_monitors!(nodes[0], 1);
3595
3596                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3597                 assert_eq!(events.len(), 1);
3598                 SendEvent::from_event(events.remove(0))
3599         };
3600
3601         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
3602         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
3603
3604         expect_pending_htlcs_forwardable!(nodes[1]);
3605
3606         let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
3607         assert_eq!(events_2.len(), 1);
3608         payment_event = SendEvent::from_event(events_2.remove(0));
3609         assert_eq!(payment_event.msgs.len(), 1);
3610
3611         check_added_monitors!(nodes[1], 1);
3612         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
3613         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg);
3614         check_added_monitors!(nodes[2], 1);
3615         let (_, _) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3616
3617         // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous
3618         // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
3619         // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
3620
3621         nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
3622         check_closed_broadcast!(nodes[2], true);
3623         check_added_monitors!(nodes[2], 1);
3624         check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
3625         let tx = {
3626                 let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
3627                 // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
3628                 // have a use for it unless nodes[2] learns the preimage somehow, the funds will go
3629                 // back to nodes[1] upon timeout otherwise.
3630                 assert_eq!(node_txn.len(), 1);
3631                 node_txn.remove(0)
3632         };
3633
3634         mine_transaction(&nodes[1], &tx);
3635
3636         // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
3637         check_closed_broadcast!(nodes[1], true);
3638         check_added_monitors!(nodes[1], 1);
3639         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
3640
3641         // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
3642         {
3643                 get_monitor!(nodes[2], payment_event.commitment_msg.channel_id)
3644                         .provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator), &node_cfgs[2].logger);
3645         }
3646         mine_transaction(&nodes[2], &tx);
3647         let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
3648         assert_eq!(node_txn.len(), 1);
3649         assert_eq!(node_txn[0].input.len(), 1);
3650         assert_eq!(node_txn[0].input[0].previous_output.txid, tx.txid());
3651         assert_eq!(node_txn[0].lock_time, LockTime::ZERO); // Must be an HTLC-Success
3652         assert_eq!(node_txn[0].input[0].witness.len(), 5); // Must be an HTLC-Success
3653
3654         check_spends!(node_txn[0], tx);
3655 }
3656
3657 #[test]
3658 fn test_dup_events_on_peer_disconnect() {
3659         // Test that if we receive a duplicative update_fulfill_htlc message after a reconnect we do
3660         // not generate a corresponding duplicative PaymentSent event. This did not use to be the case
3661         // as we used to generate the event immediately upon receipt of the payment preimage in the
3662         // update_fulfill_htlc message.
3663
3664         let chanmon_cfgs = create_chanmon_cfgs(2);
3665         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3666         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3667         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3668         create_announced_chan_between_nodes(&nodes, 0, 1);
3669
3670         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
3671
3672         nodes[1].node.claim_funds(payment_preimage);
3673         expect_payment_claimed!(nodes[1], payment_hash, 1_000_000);
3674         check_added_monitors!(nodes[1], 1);
3675         let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
3676         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]);
3677         expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
3678
3679         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3680         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3681
3682         let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3683         reconnect_args.pending_htlc_claims.0 = 1;
3684         reconnect_nodes(reconnect_args);
3685         expect_payment_path_successful!(nodes[0]);
3686 }
3687
3688 #[test]
3689 fn test_peer_disconnected_before_funding_broadcasted() {
3690         // Test that channels are closed with `ClosureReason::DisconnectedPeer` if the peer disconnects
3691         // before the funding transaction has been broadcasted.
3692         let chanmon_cfgs = create_chanmon_cfgs(2);
3693         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3694         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3695         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3696
3697         // Open a channel between `nodes[0]` and `nodes[1]`, for which the funding transaction is never
3698         // broadcasted, even though it's created by `nodes[0]`.
3699         let expected_temporary_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap();
3700         let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
3701         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
3702         let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
3703         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
3704
3705         let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
3706         assert_eq!(temporary_channel_id, expected_temporary_channel_id);
3707
3708         assert!(nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
3709
3710         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
3711         assert_eq!(funding_created_msg.temporary_channel_id, expected_temporary_channel_id);
3712
3713         // Even though the funding transaction is created by `nodes[0]`, the `FundingCreated` msg is
3714         // never sent to `nodes[1]`, and therefore the tx is never signed by either party nor
3715         // broadcasted.
3716         {
3717                 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
3718         }
3719
3720         // Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` when the peers are
3721         // disconnected before the funding transaction was broadcasted.
3722         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3723         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3724
3725         check_closed_event!(&nodes[0], 2, ClosureReason::DisconnectedPeer, true
3726                 , [nodes[1].node.get_our_node_id()], 1000000);
3727         check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false
3728                 , [nodes[0].node.get_our_node_id()], 1000000);
3729 }
3730
3731 #[test]
3732 fn test_simple_peer_disconnect() {
3733         // Test that we can reconnect when there are no lost messages
3734         let chanmon_cfgs = create_chanmon_cfgs(3);
3735         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3736         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3737         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3738         create_announced_chan_between_nodes(&nodes, 0, 1);
3739         create_announced_chan_between_nodes(&nodes, 1, 2);
3740
3741         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3742         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3743         let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3744         reconnect_args.send_channel_ready = (true, true);
3745         reconnect_nodes(reconnect_args);
3746
3747         let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
3748         let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3749         fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2);
3750         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1);
3751
3752         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3753         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3754         reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3755
3756         let (payment_preimage_3, payment_hash_3, ..) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000);
3757         let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
3758         let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3759         let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3760
3761         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3762         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3763
3764         claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_preimage_3);
3765         fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5);
3766
3767         let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3768         reconnect_args.pending_cell_htlc_fails.0 = 1;
3769         reconnect_args.pending_cell_htlc_claims.0 = 1;
3770         reconnect_nodes(reconnect_args);
3771         {
3772                 let events = nodes[0].node.get_and_clear_pending_events();
3773                 assert_eq!(events.len(), 4);
3774                 match events[0] {
3775                         Event::PaymentSent { payment_preimage, payment_hash, .. } => {
3776                                 assert_eq!(payment_preimage, payment_preimage_3);
3777                                 assert_eq!(payment_hash, payment_hash_3);
3778                         },
3779                         _ => panic!("Unexpected event"),
3780                 }
3781                 match events[1] {
3782                         Event::PaymentPathSuccessful { .. } => {},
3783                         _ => panic!("Unexpected event"),
3784                 }
3785                 match events[2] {
3786                         Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } => {
3787                                 assert_eq!(payment_hash, payment_hash_5);
3788                                 assert!(payment_failed_permanently);
3789                         },
3790                         _ => panic!("Unexpected event"),
3791                 }
3792                 match events[3] {
3793                         Event::PaymentFailed { payment_hash, .. } => {
3794                                 assert_eq!(payment_hash, payment_hash_5);
3795                         },
3796                         _ => panic!("Unexpected event"),
3797                 }
3798         }
3799         check_added_monitors(&nodes[0], 1);
3800
3801         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4);
3802         fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6);
3803 }
3804
3805 fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken_lnd: bool) {
3806         // Test that we can reconnect when in-flight HTLC updates get dropped
3807         let chanmon_cfgs = create_chanmon_cfgs(2);
3808         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3809         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3810         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3811
3812         let mut as_channel_ready = None;
3813         let channel_id = if messages_delivered == 0 {
3814                 let (channel_ready, chan_id, _) = create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001);
3815                 as_channel_ready = Some(channel_ready);
3816                 // nodes[1] doesn't receive the channel_ready message (it'll be re-sent on reconnect)
3817                 // Note that we store it so that if we're running with `simulate_broken_lnd` we can deliver
3818                 // it before the channel_reestablish message.
3819                 chan_id
3820         } else {
3821                 create_announced_chan_between_nodes(&nodes, 0, 1).2
3822         };
3823
3824         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000);
3825
3826         let payment_event = {
3827                 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
3828                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
3829                 check_added_monitors!(nodes[0], 1);
3830
3831                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3832                 assert_eq!(events.len(), 1);
3833                 SendEvent::from_event(events.remove(0))
3834         };
3835         assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id);
3836
3837         if messages_delivered < 2 {
3838                 // Drop the payment_event messages, and let them get re-generated in reconnect_nodes!
3839         } else {
3840                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
3841                 if messages_delivered >= 3 {
3842                         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
3843                         check_added_monitors!(nodes[1], 1);
3844                         let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
3845
3846                         if messages_delivered >= 4 {
3847                                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
3848                                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3849                                 check_added_monitors!(nodes[0], 1);
3850
3851                                 if messages_delivered >= 5 {
3852                                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
3853                                         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
3854                                         // No commitment_signed so get_event_msg's assert(len == 1) passes
3855                                         check_added_monitors!(nodes[0], 1);
3856
3857                                         if messages_delivered >= 6 {
3858                                                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
3859                                                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3860                                                 check_added_monitors!(nodes[1], 1);
3861                                         }
3862                                 }
3863                         }
3864                 }
3865         }
3866
3867         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3868         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3869         if messages_delivered < 3 {
3870                 if simulate_broken_lnd {
3871                         // lnd has a long-standing bug where they send a channel_ready prior to a
3872                         // channel_reestablish if you reconnect prior to channel_ready time.
3873                         //
3874                         // Here we simulate that behavior, delivering a channel_ready immediately on
3875                         // reconnect. Note that we don't bother skipping the now-duplicate channel_ready sent
3876                         // in `reconnect_nodes` but we currently don't fail based on that.
3877                         //
3878                         // See-also <https://github.com/lightningnetwork/lnd/issues/4006>
3879                         nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready.as_ref().unwrap().0);
3880                 }
3881                 // Even if the channel_ready messages get exchanged, as long as nothing further was
3882                 // received on either side, both sides will need to resend them.
3883                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3884                 reconnect_args.send_channel_ready = (true, true);
3885                 reconnect_args.pending_htlc_adds.1 = 1;
3886                 reconnect_nodes(reconnect_args);
3887         } else if messages_delivered == 3 {
3888                 // nodes[0] still wants its RAA + commitment_signed
3889                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3890                 reconnect_args.pending_responding_commitment_signed.0 = true;
3891                 reconnect_args.pending_raa.0 = true;
3892                 reconnect_nodes(reconnect_args);
3893         } else if messages_delivered == 4 {
3894                 // nodes[0] still wants its commitment_signed
3895                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3896                 reconnect_args.pending_responding_commitment_signed.0 = true;
3897                 reconnect_nodes(reconnect_args);
3898         } else if messages_delivered == 5 {
3899                 // nodes[1] still wants its final RAA
3900                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3901                 reconnect_args.pending_raa.1 = true;
3902                 reconnect_nodes(reconnect_args);
3903         } else if messages_delivered == 6 {
3904                 // Everything was delivered...
3905                 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3906         }
3907
3908         let events_1 = nodes[1].node.get_and_clear_pending_events();
3909         if messages_delivered == 0 {
3910                 assert_eq!(events_1.len(), 2);
3911                 match events_1[0] {
3912                         Event::ChannelReady { .. } => { },
3913                         _ => panic!("Unexpected event"),
3914                 };
3915                 match events_1[1] {
3916                         Event::PendingHTLCsForwardable { .. } => { },
3917                         _ => panic!("Unexpected event"),
3918                 };
3919         } else {
3920                 assert_eq!(events_1.len(), 1);
3921                 match events_1[0] {
3922                         Event::PendingHTLCsForwardable { .. } => { },
3923                         _ => panic!("Unexpected event"),
3924                 };
3925         }
3926
3927         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3928         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3929         reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3930
3931         nodes[1].node.process_pending_htlc_forwards();
3932
3933         let events_2 = nodes[1].node.get_and_clear_pending_events();
3934         assert_eq!(events_2.len(), 1);
3935         match events_2[0] {
3936                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
3937                         assert_eq!(payment_hash_1, *payment_hash);
3938                         assert_eq!(amount_msat, 1_000_000);
3939                         assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
3940                         assert_eq!(via_channel_id, Some(channel_id));
3941                         match &purpose {
3942                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
3943                                         assert!(payment_preimage.is_none());
3944                                         assert_eq!(payment_secret_1, *payment_secret);
3945                                 },
3946                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
3947                         }
3948                 },
3949                 _ => panic!("Unexpected event"),
3950         }
3951
3952         nodes[1].node.claim_funds(payment_preimage_1);
3953         check_added_monitors!(nodes[1], 1);
3954         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
3955
3956         let events_3 = nodes[1].node.get_and_clear_pending_msg_events();
3957         assert_eq!(events_3.len(), 1);
3958         let (update_fulfill_htlc, commitment_signed) = match events_3[0] {
3959                 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
3960                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
3961                         assert!(updates.update_add_htlcs.is_empty());
3962                         assert!(updates.update_fail_htlcs.is_empty());
3963                         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
3964                         assert!(updates.update_fail_malformed_htlcs.is_empty());
3965                         assert!(updates.update_fee.is_none());
3966                         (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
3967                 },
3968                 _ => panic!("Unexpected event"),
3969         };
3970
3971         if messages_delivered >= 1 {
3972                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlc);
3973
3974                 let events_4 = nodes[0].node.get_and_clear_pending_events();
3975                 assert_eq!(events_4.len(), 1);
3976                 match events_4[0] {
3977                         Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
3978                                 assert_eq!(payment_preimage_1, *payment_preimage);
3979                                 assert_eq!(payment_hash_1, *payment_hash);
3980                         },
3981                         _ => panic!("Unexpected event"),
3982                 }
3983
3984                 if messages_delivered >= 2 {
3985                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
3986                         check_added_monitors!(nodes[0], 1);
3987                         let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
3988
3989                         if messages_delivered >= 3 {
3990                                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
3991                                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3992                                 check_added_monitors!(nodes[1], 1);
3993
3994                                 if messages_delivered >= 4 {
3995                                         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed);
3996                                         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
3997                                         // No commitment_signed so get_event_msg's assert(len == 1) passes
3998                                         check_added_monitors!(nodes[1], 1);
3999
4000                                         if messages_delivered >= 5 {
4001                                                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
4002                                                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4003                                                 check_added_monitors!(nodes[0], 1);
4004                                         }
4005                                 }
4006                         }
4007                 }
4008         }
4009
4010         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
4011         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
4012         if messages_delivered < 2 {
4013                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4014                 reconnect_args.pending_htlc_claims.0 = 1;
4015                 reconnect_nodes(reconnect_args);
4016                 if messages_delivered < 1 {
4017                         expect_payment_sent!(nodes[0], payment_preimage_1);
4018                 } else {
4019                         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4020                 }
4021         } else if messages_delivered == 2 {
4022                 // nodes[0] still wants its RAA + commitment_signed
4023                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4024                 reconnect_args.pending_responding_commitment_signed.1 = true;
4025                 reconnect_args.pending_raa.1 = true;
4026                 reconnect_nodes(reconnect_args);
4027         } else if messages_delivered == 3 {
4028                 // nodes[0] still wants its commitment_signed
4029                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4030                 reconnect_args.pending_responding_commitment_signed.1 = true;
4031                 reconnect_nodes(reconnect_args);
4032         } else if messages_delivered == 4 {
4033                 // nodes[1] still wants its final RAA
4034                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4035                 reconnect_args.pending_raa.0 = true;
4036                 reconnect_nodes(reconnect_args);
4037         } else if messages_delivered == 5 {
4038                 // Everything was delivered...
4039                 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
4040         }
4041
4042         if messages_delivered == 1 || messages_delivered == 2 {
4043                 expect_payment_path_successful!(nodes[0]);
4044         }
4045         if messages_delivered <= 5 {
4046                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
4047                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
4048         }
4049         reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
4050
4051         if messages_delivered > 2 {
4052                 expect_payment_path_successful!(nodes[0]);
4053         }
4054
4055         // Channel should still work fine...
4056         let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
4057         let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
4058         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
4059 }
4060
4061 #[test]
4062 fn test_drop_messages_peer_disconnect_a() {
4063         do_test_drop_messages_peer_disconnect(0, true);
4064         do_test_drop_messages_peer_disconnect(0, false);
4065         do_test_drop_messages_peer_disconnect(1, false);
4066         do_test_drop_messages_peer_disconnect(2, false);
4067 }
4068
4069 #[test]
4070 fn test_drop_messages_peer_disconnect_b() {
4071         do_test_drop_messages_peer_disconnect(3, false);
4072         do_test_drop_messages_peer_disconnect(4, false);
4073         do_test_drop_messages_peer_disconnect(5, false);
4074         do_test_drop_messages_peer_disconnect(6, false);
4075 }
4076
4077 #[test]
4078 fn test_channel_ready_without_best_block_updated() {
4079         // Previously, if we were offline when a funding transaction was locked in, and then we came
4080         // back online, calling best_block_updated once followed by transactions_confirmed, we'd not
4081         // generate a channel_ready until a later best_block_updated. This tests that we generate the
4082         // channel_ready immediately instead.
4083         let chanmon_cfgs = create_chanmon_cfgs(2);
4084         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4085         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4086         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4087         *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
4088
4089         let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
4090
4091         let conf_height = nodes[0].best_block_info().1 + 1;
4092         connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
4093         let block_txn = [funding_tx];
4094         let conf_txn: Vec<_> = block_txn.iter().enumerate().collect();
4095         let conf_block_header = nodes[0].get_block_header(conf_height);
4096         nodes[0].node.transactions_confirmed(&conf_block_header, &conf_txn[..], conf_height);
4097
4098         // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
4099         let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
4100         nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
4101 }
4102
4103 #[test]
4104 fn test_channel_monitor_skipping_block_when_channel_manager_is_leading() {
4105         let chanmon_cfgs = create_chanmon_cfgs(2);
4106         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4107         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4108         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4109
4110         // Let channel_manager get ahead of chain_monitor by 1 block.
4111         // This is to emulate race-condition where newly added channel_monitor skips processing 1 block,
4112         // in case where client calls block_connect on channel_manager first and then on chain_monitor.
4113         let height_1 = nodes[0].best_block_info().1 + 1;
4114         let mut block_1 = create_dummy_block(nodes[0].best_block_hash(), height_1, Vec::new());
4115
4116         nodes[0].blocks.lock().unwrap().push((block_1.clone(), height_1));
4117         nodes[0].node.block_connected(&block_1, height_1);
4118
4119         // Create channel, and it gets added to chain_monitor in funding_created.
4120         let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
4121
4122         // Now, newly added channel_monitor in chain_monitor hasn't processed block_1,
4123         // but it's best_block is block_1, since that was populated by channel_manager, and channel_manager
4124         // was running ahead of chain_monitor at the time of funding_created.
4125         // Later on, subsequent blocks are connected to both channel_manager and chain_monitor.
4126         // Hence, this channel's channel_monitor skipped block_1, directly tries to process subsequent blocks.
4127         confirm_transaction_at(&nodes[0], &funding_tx, nodes[0].best_block_info().1 + 1);
4128         connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
4129
4130         // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
4131         let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
4132         nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
4133 }
4134
4135 #[test]
4136 fn test_channel_monitor_skipping_block_when_channel_manager_is_lagging() {
4137         let chanmon_cfgs = create_chanmon_cfgs(2);
4138         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4139         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4140         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4141
4142         // Let chain_monitor get ahead of channel_manager by 1 block.
4143         // This is to emulate race-condition where newly added channel_monitor skips processing 1 block,
4144         // in case where client calls block_connect on chain_monitor first and then on channel_manager.
4145         let height_1 = nodes[0].best_block_info().1 + 1;
4146         let mut block_1 = create_dummy_block(nodes[0].best_block_hash(), height_1, Vec::new());
4147
4148         nodes[0].blocks.lock().unwrap().push((block_1.clone(), height_1));
4149         nodes[0].chain_monitor.chain_monitor.block_connected(&block_1, height_1);
4150
4151         // Create channel, and it gets added to chain_monitor in funding_created.
4152         let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
4153
4154         // channel_manager can't really skip block_1, it should get it eventually.
4155         nodes[0].node.block_connected(&block_1, height_1);
4156
4157         // Now, newly added channel_monitor in chain_monitor hasn't processed block_1, it's best_block is
4158         // the block before block_1, since that was populated by channel_manager, and channel_manager was
4159         // running behind at the time of funding_created.
4160         // Later on, subsequent blocks are connected to both channel_manager and chain_monitor.
4161         // Hence, this channel's channel_monitor skipped block_1, directly tries to process subsequent blocks.
4162         confirm_transaction_at(&nodes[0], &funding_tx, nodes[0].best_block_info().1 + 1);
4163         connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
4164
4165         // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
4166         let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
4167         nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
4168 }
4169
4170 #[test]
4171 fn test_drop_messages_peer_disconnect_dual_htlc() {
4172         // Test that we can handle reconnecting when both sides of a channel have pending
4173         // commitment_updates when we disconnect.
4174         let chanmon_cfgs = create_chanmon_cfgs(2);
4175         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4176         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4177         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4178         create_announced_chan_between_nodes(&nodes, 0, 1);
4179
4180         let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
4181
4182         // Now try to send a second payment which will fail to send
4183         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
4184         nodes[0].node.send_payment_with_route(&route, payment_hash_2,
4185                 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
4186         check_added_monitors!(nodes[0], 1);
4187
4188         let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
4189         assert_eq!(events_1.len(), 1);
4190         match events_1[0] {
4191                 MessageSendEvent::UpdateHTLCs { .. } => {},
4192                 _ => panic!("Unexpected event"),
4193         }
4194
4195         nodes[1].node.claim_funds(payment_preimage_1);
4196         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
4197         check_added_monitors!(nodes[1], 1);
4198
4199         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
4200         assert_eq!(events_2.len(), 1);
4201         match events_2[0] {
4202                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
4203                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
4204                         assert!(update_add_htlcs.is_empty());
4205                         assert_eq!(update_fulfill_htlcs.len(), 1);
4206                         assert!(update_fail_htlcs.is_empty());
4207                         assert!(update_fail_malformed_htlcs.is_empty());
4208                         assert!(update_fee.is_none());
4209
4210                         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
4211                         let events_3 = nodes[0].node.get_and_clear_pending_events();
4212                         assert_eq!(events_3.len(), 1);
4213                         match events_3[0] {
4214                                 Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
4215                                         assert_eq!(*payment_preimage, payment_preimage_1);
4216                                         assert_eq!(*payment_hash, payment_hash_1);
4217                                 },
4218                                 _ => panic!("Unexpected event"),
4219                         }
4220
4221                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
4222                         let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4223                         // No commitment_signed so get_event_msg's assert(len == 1) passes
4224                         check_added_monitors!(nodes[0], 1);
4225                 },
4226                 _ => panic!("Unexpected event"),
4227         }
4228
4229         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
4230         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
4231
4232         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
4233                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
4234         }, true).unwrap();
4235         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
4236         assert_eq!(reestablish_1.len(), 1);
4237         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
4238                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
4239         }, false).unwrap();
4240         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
4241         assert_eq!(reestablish_2.len(), 1);
4242
4243         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
4244         let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
4245         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
4246         let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
4247
4248         assert!(as_resp.0.is_none());
4249         assert!(bs_resp.0.is_none());
4250
4251         assert!(bs_resp.1.is_none());
4252         assert!(bs_resp.2.is_none());
4253
4254         assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
4255
4256         assert_eq!(as_resp.2.as_ref().unwrap().update_add_htlcs.len(), 1);
4257         assert!(as_resp.2.as_ref().unwrap().update_fulfill_htlcs.is_empty());
4258         assert!(as_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
4259         assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
4260         assert!(as_resp.2.as_ref().unwrap().update_fee.is_none());
4261         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]);
4262         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed);
4263         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4264         // No commitment_signed so get_event_msg's assert(len == 1) passes
4265         check_added_monitors!(nodes[1], 1);
4266
4267         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap());
4268         let bs_second_commitment_signed = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4269         assert!(bs_second_commitment_signed.update_add_htlcs.is_empty());
4270         assert!(bs_second_commitment_signed.update_fulfill_htlcs.is_empty());
4271         assert!(bs_second_commitment_signed.update_fail_htlcs.is_empty());
4272         assert!(bs_second_commitment_signed.update_fail_malformed_htlcs.is_empty());
4273         assert!(bs_second_commitment_signed.update_fee.is_none());
4274         check_added_monitors!(nodes[1], 1);
4275
4276         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
4277         let as_commitment_signed = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4278         assert!(as_commitment_signed.update_add_htlcs.is_empty());
4279         assert!(as_commitment_signed.update_fulfill_htlcs.is_empty());
4280         assert!(as_commitment_signed.update_fail_htlcs.is_empty());
4281         assert!(as_commitment_signed.update_fail_malformed_htlcs.is_empty());
4282         assert!(as_commitment_signed.update_fee.is_none());
4283         check_added_monitors!(nodes[0], 1);
4284
4285         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed);
4286         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4287         // No commitment_signed so get_event_msg's assert(len == 1) passes
4288         check_added_monitors!(nodes[0], 1);
4289
4290         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed);
4291         let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4292         // No commitment_signed so get_event_msg's assert(len == 1) passes
4293         check_added_monitors!(nodes[1], 1);
4294
4295         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
4296         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4297         check_added_monitors!(nodes[1], 1);
4298
4299         expect_pending_htlcs_forwardable!(nodes[1]);
4300
4301         let events_5 = nodes[1].node.get_and_clear_pending_events();
4302         assert_eq!(events_5.len(), 1);
4303         match events_5[0] {
4304                 Event::PaymentClaimable { ref payment_hash, ref purpose, .. } => {
4305                         assert_eq!(payment_hash_2, *payment_hash);
4306                         match &purpose {
4307                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
4308                                         assert!(payment_preimage.is_none());
4309                                         assert_eq!(payment_secret_2, *payment_secret);
4310                                 },
4311                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
4312                         }
4313                 },
4314                 _ => panic!("Unexpected event"),
4315         }
4316
4317         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack);
4318         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4319         check_added_monitors!(nodes[0], 1);
4320
4321         expect_payment_path_successful!(nodes[0]);
4322         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
4323 }
4324
4325 fn do_test_htlc_timeout(send_partial_mpp: bool) {
4326         // If the user fails to claim/fail an HTLC within the HTLC CLTV timeout we fail it for them
4327         // to avoid our counterparty failing the channel.
4328         let chanmon_cfgs = create_chanmon_cfgs(2);
4329         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4330         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4331         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4332
4333         create_announced_chan_between_nodes(&nodes, 0, 1);
4334
4335         let our_payment_hash = if send_partial_mpp {
4336                 let (route, our_payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100000);
4337                 // Use the utility function send_payment_along_path to send the payment with MPP data which
4338                 // indicates there are more HTLCs coming.
4339                 let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
4340                 let payment_id = PaymentId([42; 32]);
4341                 let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
4342                         RecipientOnionFields::secret_only(payment_secret), payment_id, &route).unwrap();
4343                 nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash,
4344                         RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id,
4345                         &None, session_privs[0]).unwrap();
4346                 check_added_monitors!(nodes[0], 1);
4347                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
4348                 assert_eq!(events.len(), 1);
4349                 // Now do the relevant commitment_signed/RAA dances along the path, noting that the final
4350                 // hop should *not* yet generate any PaymentClaimable event(s).
4351                 pass_along_path(&nodes[0], &[&nodes[1]], 100000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
4352                 our_payment_hash
4353         } else {
4354                 route_payment(&nodes[0], &[&nodes[1]], 100000).1
4355         };
4356
4357         let mut block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new());
4358         connect_block(&nodes[0], &block);
4359         connect_block(&nodes[1], &block);
4360         let block_count = TEST_FINAL_CLTV + CHAN_CONFIRM_DEPTH + 2 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS;
4361         for _ in CHAN_CONFIRM_DEPTH + 2..block_count {
4362                 block.header.prev_blockhash = block.block_hash();
4363                 connect_block(&nodes[0], &block);
4364                 connect_block(&nodes[1], &block);
4365         }
4366
4367         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
4368
4369         check_added_monitors!(nodes[1], 1);
4370         let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4371         assert!(htlc_timeout_updates.update_add_htlcs.is_empty());
4372         assert_eq!(htlc_timeout_updates.update_fail_htlcs.len(), 1);
4373         assert!(htlc_timeout_updates.update_fail_malformed_htlcs.is_empty());
4374         assert!(htlc_timeout_updates.update_fee.is_none());
4375
4376         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_timeout_updates.update_fail_htlcs[0]);
4377         commitment_signed_dance!(nodes[0], nodes[1], htlc_timeout_updates.commitment_signed, false);
4378         // 100_000 msat as u64, followed by the height at which we failed back above
4379         let mut expected_failure_data = (100_000 as u64).to_be_bytes().to_vec();
4380         expected_failure_data.extend_from_slice(&(block_count - 1).to_be_bytes());
4381         expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000 | 15, &expected_failure_data[..]);
4382 }
4383
4384 #[test]
4385 fn test_htlc_timeout() {
4386         do_test_htlc_timeout(true);
4387         do_test_htlc_timeout(false);
4388 }
4389
4390 fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) {
4391         // Tests that HTLCs in the holding cell are timed out after the requisite number of blocks.
4392         let chanmon_cfgs = create_chanmon_cfgs(3);
4393         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
4394         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
4395         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
4396         create_announced_chan_between_nodes(&nodes, 0, 1);
4397         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4398
4399         // Make sure all nodes are at the same starting height
4400         connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
4401         connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
4402         connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
4403
4404         // Route a first payment to get the 1 -> 2 channel in awaiting_raa...
4405         let (route, first_payment_hash, _, first_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
4406         nodes[1].node.send_payment_with_route(&route, first_payment_hash,
4407                 RecipientOnionFields::secret_only(first_payment_secret), PaymentId(first_payment_hash.0)).unwrap();
4408         assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1);
4409         check_added_monitors!(nodes[1], 1);
4410
4411         // Now attempt to route a second payment, which should be placed in the holding cell
4412         let sending_node = if forwarded_htlc { &nodes[0] } else { &nodes[1] };
4413         let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(sending_node, nodes[2], 100000);
4414         sending_node.node.send_payment_with_route(&route, second_payment_hash,
4415                 RecipientOnionFields::secret_only(second_payment_secret), PaymentId(second_payment_hash.0)).unwrap();
4416         if forwarded_htlc {
4417                 check_added_monitors!(nodes[0], 1);
4418                 let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
4419                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
4420                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
4421                 expect_pending_htlcs_forwardable!(nodes[1]);
4422         }
4423         check_added_monitors!(nodes[1], 0);
4424
4425         connect_blocks(&nodes[1], TEST_FINAL_CLTV - LATENCY_GRACE_PERIOD_BLOCKS);
4426         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4427         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
4428         connect_blocks(&nodes[1], 1);
4429
4430         if forwarded_htlc {
4431                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
4432                 check_added_monitors!(nodes[1], 1);
4433                 let fail_commit = nodes[1].node.get_and_clear_pending_msg_events();
4434                 assert_eq!(fail_commit.len(), 1);
4435                 match fail_commit[0] {
4436                         MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, ref commitment_signed, .. }, .. } => {
4437                                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
4438                                 commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, true, true);
4439                         },
4440                         _ => unreachable!(),
4441                 }
4442                 expect_payment_failed_with_update!(nodes[0], second_payment_hash, false, chan_2.0.contents.short_channel_id, false);
4443         } else {
4444                 expect_payment_failed!(nodes[1], second_payment_hash, false);
4445         }
4446 }
4447
4448 #[test]
4449 fn test_holding_cell_htlc_add_timeouts() {
4450         do_test_holding_cell_htlc_add_timeouts(false);
4451         do_test_holding_cell_htlc_add_timeouts(true);
4452 }
4453
4454 macro_rules! check_spendable_outputs {
4455         ($node: expr, $keysinterface: expr) => {
4456                 {
4457                         let mut events = $node.chain_monitor.chain_monitor.get_and_clear_pending_events();
4458                         let mut txn = Vec::new();
4459                         let mut all_outputs = Vec::new();
4460                         let secp_ctx = Secp256k1::new();
4461                         for event in events.drain(..) {
4462                                 match event {
4463                                         Event::SpendableOutputs { mut outputs, channel_id: _ } => {
4464                                                 for outp in outputs.drain(..) {
4465                                                         txn.push($keysinterface.backing.spend_spendable_outputs(&[&outp], Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx).unwrap());
4466                                                         all_outputs.push(outp);
4467                                                 }
4468                                         },
4469                                         _ => panic!("Unexpected event"),
4470                                 };
4471                         }
4472                         if all_outputs.len() > 1 {
4473                                 if let Ok(tx) = $keysinterface.backing.spend_spendable_outputs(&all_outputs.iter().map(|a| a).collect::<Vec<_>>(), Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx) {
4474                                         txn.push(tx);
4475                                 }
4476                         }
4477                         txn
4478                 }
4479         }
4480 }
4481
4482 #[test]
4483 fn test_claim_sizeable_push_msat() {
4484         // Incidentally test SpendableOutput event generation due to detection of to_local output on commitment tx
4485         let chanmon_cfgs = create_chanmon_cfgs(2);
4486         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4487         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4488         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4489
4490         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000);
4491         nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
4492         check_closed_broadcast!(nodes[1], true);
4493         check_added_monitors!(nodes[1], 1);
4494         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
4495         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4496         assert_eq!(node_txn.len(), 1);
4497         check_spends!(node_txn[0], chan.3);
4498         assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
4499
4500         mine_transaction(&nodes[1], &node_txn[0]);
4501         connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1);
4502
4503         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4504         assert_eq!(spend_txn.len(), 1);
4505         assert_eq!(spend_txn[0].input.len(), 1);
4506         check_spends!(spend_txn[0], node_txn[0]);
4507         assert_eq!(spend_txn[0].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
4508 }
4509
4510 #[test]
4511 fn test_claim_on_remote_sizeable_push_msat() {
4512         // Same test as previous, just test on remote commitment tx, as per_commitment_point registration changes following you're funder/fundee and
4513         // to_remote output is encumbered by a P2WPKH
4514         let chanmon_cfgs = create_chanmon_cfgs(2);
4515         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4516         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4517         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4518
4519         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000);
4520         nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
4521         check_closed_broadcast!(nodes[0], true);
4522         check_added_monitors!(nodes[0], 1);
4523         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
4524
4525         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4526         assert_eq!(node_txn.len(), 1);
4527         check_spends!(node_txn[0], chan.3);
4528         assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
4529
4530         mine_transaction(&nodes[1], &node_txn[0]);
4531         check_closed_broadcast!(nodes[1], true);
4532         check_added_monitors!(nodes[1], 1);
4533         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4534         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4535
4536         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4537         assert_eq!(spend_txn.len(), 1);
4538         check_spends!(spend_txn[0], node_txn[0]);
4539 }
4540
4541 #[test]
4542 fn test_claim_on_remote_revoked_sizeable_push_msat() {
4543         // Same test as previous, just test on remote revoked commitment tx, as per_commitment_point registration changes following you're funder/fundee and
4544         // to_remote output is encumbered by a P2WPKH
4545
4546         let chanmon_cfgs = create_chanmon_cfgs(2);
4547         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4548         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4549         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4550
4551         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 59000000);
4552         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4553         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan.2);
4554         assert_eq!(revoked_local_txn[0].input.len(), 1);
4555         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
4556
4557         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4558         mine_transaction(&nodes[1], &revoked_local_txn[0]);
4559         check_closed_broadcast!(nodes[1], true);
4560         check_added_monitors!(nodes[1], 1);
4561         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4562
4563         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4564         mine_transaction(&nodes[1], &node_txn[0]);
4565         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4566
4567         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4568         assert_eq!(spend_txn.len(), 3);
4569         check_spends!(spend_txn[0], revoked_local_txn[0]); // to_remote output on revoked remote commitment_tx
4570         check_spends!(spend_txn[1], node_txn[0]);
4571         check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[0]); // Both outputs
4572 }
4573
4574 #[test]
4575 fn test_static_spendable_outputs_preimage_tx() {
4576         let chanmon_cfgs = create_chanmon_cfgs(2);
4577         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4578         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4579         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4580
4581         // Create some initial channels
4582         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4583
4584         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
4585
4586         let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
4587         assert_eq!(commitment_tx[0].input.len(), 1);
4588         assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
4589
4590         // Settle A's commitment tx on B's chain
4591         nodes[1].node.claim_funds(payment_preimage);
4592         expect_payment_claimed!(nodes[1], payment_hash, 3_000_000);
4593         check_added_monitors!(nodes[1], 1);
4594         mine_transaction(&nodes[1], &commitment_tx[0]);
4595         check_added_monitors!(nodes[1], 1);
4596         let events = nodes[1].node.get_and_clear_pending_msg_events();
4597         match events[0] {
4598                 MessageSendEvent::UpdateHTLCs { .. } => {},
4599                 _ => panic!("Unexpected event"),
4600         }
4601         match events[1] {
4602                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4603                 _ => panic!("Unexepected event"),
4604         }
4605
4606         // Check B's monitor was able to send back output descriptor event for preimage tx on A's commitment tx
4607         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: preimage tx
4608         assert_eq!(node_txn.len(), 1);
4609         check_spends!(node_txn[0], commitment_tx[0]);
4610         assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
4611
4612         mine_transaction(&nodes[1], &node_txn[0]);
4613         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4614         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4615
4616         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4617         assert_eq!(spend_txn.len(), 1);
4618         check_spends!(spend_txn[0], node_txn[0]);
4619 }
4620
4621 #[test]
4622 fn test_static_spendable_outputs_timeout_tx() {
4623         let chanmon_cfgs = create_chanmon_cfgs(2);
4624         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4625         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4626         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4627
4628         // Create some initial channels
4629         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4630
4631         // Rebalance the network a bit by relaying one payment through all the channels ...
4632         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
4633
4634         let (_, our_payment_hash, ..) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000);
4635
4636         let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
4637         assert_eq!(commitment_tx[0].input.len(), 1);
4638         assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
4639
4640         // Settle A's commitment tx on B' chain
4641         mine_transaction(&nodes[1], &commitment_tx[0]);
4642         check_added_monitors!(nodes[1], 1);
4643         let events = nodes[1].node.get_and_clear_pending_msg_events();
4644         match events[0] {
4645                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4646                 _ => panic!("Unexpected event"),
4647         }
4648         connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
4649
4650         // Check B's monitor was able to send back output descriptor event for timeout tx on A's commitment tx
4651         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4652         assert_eq!(node_txn.len(), 1); // ChannelMonitor: timeout tx
4653         check_spends!(node_txn[0],  commitment_tx[0].clone());
4654         assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4655
4656         mine_transaction(&nodes[1], &node_txn[0]);
4657         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4658         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4659         expect_payment_failed!(nodes[1], our_payment_hash, false);
4660
4661         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4662         assert_eq!(spend_txn.len(), 3); // SpendableOutput: remote_commitment_tx.to_remote, timeout_tx.output
4663         check_spends!(spend_txn[0], commitment_tx[0]);
4664         check_spends!(spend_txn[1], node_txn[0]);
4665         check_spends!(spend_txn[2], node_txn[0], commitment_tx[0]); // All outputs
4666 }
4667
4668 #[test]
4669 fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
4670         let chanmon_cfgs = create_chanmon_cfgs(2);
4671         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4672         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4673         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4674
4675         // Create some initial channels
4676         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4677
4678         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4679         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
4680         assert_eq!(revoked_local_txn[0].input.len(), 1);
4681         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4682
4683         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4684
4685         mine_transaction(&nodes[1], &revoked_local_txn[0]);
4686         check_closed_broadcast!(nodes[1], true);
4687         check_added_monitors!(nodes[1], 1);
4688         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4689
4690         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4691         assert_eq!(node_txn.len(), 1);
4692         assert_eq!(node_txn[0].input.len(), 2);
4693         check_spends!(node_txn[0], revoked_local_txn[0]);
4694
4695         mine_transaction(&nodes[1], &node_txn[0]);
4696         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4697
4698         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4699         assert_eq!(spend_txn.len(), 1);
4700         check_spends!(spend_txn[0], node_txn[0]);
4701 }
4702
4703 #[test]
4704 fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
4705         let mut chanmon_cfgs = create_chanmon_cfgs(2);
4706         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
4707         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4708         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4709         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4710
4711         // Create some initial channels
4712         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4713
4714         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4715         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
4716         assert_eq!(revoked_local_txn[0].input.len(), 1);
4717         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4718
4719         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4720
4721         // A will generate HTLC-Timeout from revoked commitment tx
4722         mine_transaction(&nodes[0], &revoked_local_txn[0]);
4723         check_closed_broadcast!(nodes[0], true);
4724         check_added_monitors!(nodes[0], 1);
4725         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
4726         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
4727
4728         let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4729         assert_eq!(revoked_htlc_txn.len(), 1);
4730         assert_eq!(revoked_htlc_txn[0].input.len(), 1);
4731         assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
4732         check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]);
4733         assert_ne!(revoked_htlc_txn[0].lock_time, LockTime::ZERO); // HTLC-Timeout
4734
4735         // B will generate justice tx from A's revoked commitment/HTLC tx
4736         connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
4737         check_closed_broadcast!(nodes[1], true);
4738         check_added_monitors!(nodes[1], 1);
4739         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4740
4741         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4742         assert_eq!(node_txn.len(), 2); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs
4743         // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0]
4744         // including the one already spent by revoked_htlc_txn[1]. That's OK, we'll spend with valid
4745         // transactions next...
4746         assert_eq!(node_txn[0].input.len(), 3);
4747         check_spends!(node_txn[0], revoked_local_txn[0], revoked_htlc_txn[0]);
4748
4749         assert_eq!(node_txn[1].input.len(), 2);
4750         check_spends!(node_txn[1], revoked_local_txn[0], revoked_htlc_txn[0]);
4751         if node_txn[1].input[1].previous_output.txid == revoked_htlc_txn[0].txid() {
4752                 assert_ne!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4753         } else {
4754                 assert_eq!(node_txn[1].input[0].previous_output.txid, revoked_htlc_txn[0].txid());
4755                 assert_ne!(node_txn[1].input[1].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4756         }
4757
4758         mine_transaction(&nodes[1], &node_txn[1]);
4759         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4760
4761         // Check B's ChannelMonitor was able to generate the right spendable output descriptor
4762         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4763         assert_eq!(spend_txn.len(), 1);
4764         assert_eq!(spend_txn[0].input.len(), 1);
4765         check_spends!(spend_txn[0], node_txn[1]);
4766 }
4767
4768 #[test]
4769 fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
4770         let mut chanmon_cfgs = create_chanmon_cfgs(2);
4771         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
4772         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4773         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4774         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4775
4776         // Create some initial channels
4777         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4778
4779         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4780         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
4781         assert_eq!(revoked_local_txn[0].input.len(), 1);
4782         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4783
4784         // The to-be-revoked commitment tx should have one HTLC and one to_remote output
4785         assert_eq!(revoked_local_txn[0].output.len(), 2);
4786
4787         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4788
4789         // B will generate HTLC-Success from revoked commitment tx
4790         mine_transaction(&nodes[1], &revoked_local_txn[0]);
4791         check_closed_broadcast!(nodes[1], true);
4792         check_added_monitors!(nodes[1], 1);
4793         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4794         let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4795
4796         assert_eq!(revoked_htlc_txn.len(), 1);
4797         assert_eq!(revoked_htlc_txn[0].input.len(), 1);
4798         assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4799         check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]);
4800
4801         // Check that the unspent (of two) outputs on revoked_local_txn[0] is a P2WPKH:
4802         let unspent_local_txn_output = revoked_htlc_txn[0].input[0].previous_output.vout as usize ^ 1;
4803         assert_eq!(revoked_local_txn[0].output[unspent_local_txn_output].script_pubkey.len(), 2 + 20); // P2WPKH
4804
4805         // A will generate justice tx from B's revoked commitment/HTLC tx
4806         connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
4807         check_closed_broadcast!(nodes[0], true);
4808         check_added_monitors!(nodes[0], 1);
4809         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
4810
4811         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4812         assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success
4813
4814         // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0]
4815         // including the one already spent by revoked_htlc_txn[0]. That's OK, we'll spend with valid
4816         // transactions next...
4817         assert_eq!(node_txn[0].input.len(), 2);
4818         check_spends!(node_txn[0], revoked_local_txn[0], revoked_htlc_txn[0]);
4819         if node_txn[0].input[1].previous_output.txid == revoked_htlc_txn[0].txid() {
4820                 assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4821         } else {
4822                 assert_eq!(node_txn[0].input[0].previous_output.txid, revoked_htlc_txn[0].txid());
4823                 assert_eq!(node_txn[0].input[1].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4824         }
4825
4826         assert_eq!(node_txn[1].input.len(), 1);
4827         check_spends!(node_txn[1], revoked_htlc_txn[0]);
4828
4829         mine_transaction(&nodes[0], &node_txn[1]);
4830         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
4831
4832         // Note that nodes[0]'s tx_broadcaster is still locked, so if we get here the channelmonitor
4833         // didn't try to generate any new transactions.
4834
4835         // Check A's ChannelMonitor was able to generate the right spendable output descriptor
4836         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
4837         assert_eq!(spend_txn.len(), 3);
4838         assert_eq!(spend_txn[0].input.len(), 1);
4839         check_spends!(spend_txn[0], revoked_local_txn[0]); // spending to_remote output from revoked local tx
4840         assert_ne!(spend_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4841         check_spends!(spend_txn[1], node_txn[1]); // spending justice tx output on the htlc success tx
4842         check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[1]); // Both outputs
4843 }
4844
4845 #[test]
4846 fn test_onchain_to_onchain_claim() {
4847         // Test that in case of channel closure, we detect the state of output and claim HTLC
4848         // on downstream peer's remote commitment tx.
4849         // First, have C claim an HTLC against its own latest commitment transaction.
4850         // Then, broadcast these to B, which should update the monitor downstream on the A<->B
4851         // channel.
4852         // Finally, check that B will claim the HTLC output if A's latest commitment transaction
4853         // gets broadcast.
4854
4855         let chanmon_cfgs = create_chanmon_cfgs(3);
4856         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
4857         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
4858         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
4859
4860         // Create some initial channels
4861         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4862         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4863
4864         // Ensure all nodes are at the same height
4865         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
4866         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
4867         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
4868         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
4869
4870         // Rebalance the network a bit by relaying one payment through all the channels ...
4871         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
4872         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
4873
4874         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
4875         let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
4876         check_spends!(commitment_tx[0], chan_2.3);
4877         nodes[2].node.claim_funds(payment_preimage);
4878         expect_payment_claimed!(nodes[2], payment_hash, 3_000_000);
4879         check_added_monitors!(nodes[2], 1);
4880         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
4881         assert!(updates.update_add_htlcs.is_empty());
4882         assert!(updates.update_fail_htlcs.is_empty());
4883         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
4884         assert!(updates.update_fail_malformed_htlcs.is_empty());
4885
4886         mine_transaction(&nodes[2], &commitment_tx[0]);
4887         check_closed_broadcast!(nodes[2], true);
4888         check_added_monitors!(nodes[2], 1);
4889         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
4890
4891         let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 1 (HTLC-Success tx)
4892         assert_eq!(c_txn.len(), 1);
4893         check_spends!(c_txn[0], commitment_tx[0]);
4894         assert_eq!(c_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4895         assert!(c_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
4896         assert_eq!(c_txn[0].lock_time, LockTime::ZERO); // Success tx
4897
4898         // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor
4899         connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), c_txn[0].clone()]));
4900         check_added_monitors!(nodes[1], 1);
4901         let events = nodes[1].node.get_and_clear_pending_events();
4902         assert_eq!(events.len(), 2);
4903         match events[0] {
4904                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
4905                 _ => panic!("Unexpected event"),
4906         }
4907         match events[1] {
4908                 Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => {
4909                         assert_eq!(fee_earned_msat, Some(1000));
4910                         assert_eq!(prev_channel_id, Some(chan_1.2));
4911                         assert_eq!(claim_from_onchain_tx, true);
4912                         assert_eq!(next_channel_id, Some(chan_2.2));
4913                         assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
4914                 },
4915                 _ => panic!("Unexpected event"),
4916         }
4917         check_added_monitors!(nodes[1], 1);
4918         let mut msg_events = nodes[1].node.get_and_clear_pending_msg_events();
4919         assert_eq!(msg_events.len(), 3);
4920         let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut msg_events);
4921         let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut msg_events);
4922
4923         match nodes_2_event {
4924                 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id: _ } => {},
4925                 _ => panic!("Unexpected event"),
4926         }
4927
4928         match nodes_0_event {
4929                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
4930                         assert!(update_add_htlcs.is_empty());
4931                         assert!(update_fail_htlcs.is_empty());
4932                         assert_eq!(update_fulfill_htlcs.len(), 1);
4933                         assert!(update_fail_malformed_htlcs.is_empty());
4934                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
4935                 },
4936                 _ => panic!("Unexpected event"),
4937         };
4938
4939         // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
4940         match msg_events[0] {
4941                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4942                 _ => panic!("Unexpected event"),
4943         }
4944
4945         // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
4946         let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
4947         mine_transaction(&nodes[1], &commitment_tx[0]);
4948         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4949         let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4950         // ChannelMonitor: HTLC-Success tx
4951         assert_eq!(b_txn.len(), 1);
4952         check_spends!(b_txn[0], commitment_tx[0]);
4953         assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
4954         assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
4955         assert_eq!(b_txn[0].lock_time.to_consensus_u32(), nodes[1].best_block_info().1); // Success tx
4956
4957         check_closed_broadcast!(nodes[1], true);
4958         check_added_monitors!(nodes[1], 1);
4959 }
4960
4961 #[test]
4962 fn test_duplicate_payment_hash_one_failure_one_success() {
4963         // Topology : A --> B --> C --> D
4964         // We route 2 payments with same hash between B and C, one will be timeout, the other successfully claim
4965         // Note that because C will refuse to generate two payment secrets for the same payment hash,
4966         // we forward one of the payments onwards to D.
4967         let chanmon_cfgs = create_chanmon_cfgs(4);
4968         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
4969         // When this test was written, the default base fee floated based on the HTLC count.
4970         // It is now fixed, so we simply set the fee to the expected value here.
4971         let mut config = test_default_channel_config();
4972         config.channel_config.forwarding_fee_base_msat = 196;
4973         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs,
4974                 &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
4975         let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
4976
4977         create_announced_chan_between_nodes(&nodes, 0, 1);
4978         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4979         create_announced_chan_between_nodes(&nodes, 2, 3);
4980
4981         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
4982         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
4983         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
4984         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
4985         connect_blocks(&nodes[3], node_max_height - nodes[3].best_block_info().1);
4986
4987         let (our_payment_preimage, duplicate_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 900_000);
4988
4989         let payment_secret = nodes[3].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200, None).unwrap();
4990         // We reduce the final CLTV here by a somewhat arbitrary constant to keep it under the one-byte
4991         // script push size limit so that the below script length checks match
4992         // ACCEPTED_HTLC_SCRIPT_WEIGHT.
4993         let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV - 40)
4994                 .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap();
4995         let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[3], payment_params, 800_000);
4996         send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[2], &nodes[3]]], 800_000, duplicate_payment_hash, payment_secret);
4997
4998         let commitment_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
4999         assert_eq!(commitment_txn[0].input.len(), 1);
5000         check_spends!(commitment_txn[0], chan_2.3);
5001
5002         mine_transaction(&nodes[1], &commitment_txn[0]);
5003         check_closed_broadcast!(nodes[1], true);
5004         check_added_monitors!(nodes[1], 1);
5005         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
5006         connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
5007
5008         let htlc_timeout_tx;
5009         { // Extract one of the two HTLC-Timeout transaction
5010                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5011                 // ChannelMonitor: timeout tx * 2-or-3
5012                 assert!(node_txn.len() == 2 || node_txn.len() == 3);
5013
5014                 check_spends!(node_txn[0], commitment_txn[0]);
5015                 assert_eq!(node_txn[0].input.len(), 1);
5016                 assert_eq!(node_txn[0].output.len(), 1);
5017
5018                 if node_txn.len() > 2 {
5019                         check_spends!(node_txn[1], commitment_txn[0]);
5020                         assert_eq!(node_txn[1].input.len(), 1);
5021                         assert_eq!(node_txn[1].output.len(), 1);
5022                         assert_eq!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
5023
5024                         check_spends!(node_txn[2], commitment_txn[0]);
5025                         assert_eq!(node_txn[2].input.len(), 1);
5026                         assert_eq!(node_txn[2].output.len(), 1);
5027                         assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output);
5028                 } else {
5029                         check_spends!(node_txn[1], commitment_txn[0]);
5030                         assert_eq!(node_txn[1].input.len(), 1);
5031                         assert_eq!(node_txn[1].output.len(), 1);
5032                         assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
5033                 }
5034
5035                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5036                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5037                 // Assign htlc_timeout_tx to the forwarded HTLC (with value ~800 sats). The received HTLC
5038                 // (with value 900 sats) will be claimed in the below `claim_funds` call.
5039                 if node_txn.len() > 2 {
5040                         assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5041                         htlc_timeout_tx = if node_txn[2].output[0].value < 900 { node_txn[2].clone() } else { node_txn[0].clone() };
5042                 } else {
5043                         htlc_timeout_tx = if node_txn[0].output[0].value < 900 { node_txn[1].clone() } else { node_txn[0].clone() };
5044                 }
5045         }
5046
5047         nodes[2].node.claim_funds(our_payment_preimage);
5048         expect_payment_claimed!(nodes[2], duplicate_payment_hash, 900_000);
5049
5050         mine_transaction(&nodes[2], &commitment_txn[0]);
5051         check_added_monitors!(nodes[2], 2);
5052         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
5053         let events = nodes[2].node.get_and_clear_pending_msg_events();
5054         match events[0] {
5055                 MessageSendEvent::UpdateHTLCs { .. } => {},
5056                 _ => panic!("Unexpected event"),
5057         }
5058         match events[1] {
5059                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
5060                 _ => panic!("Unexepected event"),
5061         }
5062         let htlc_success_txn: Vec<_> = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
5063         assert_eq!(htlc_success_txn.len(), 2); // ChannelMonitor: HTLC-Success txn (*2 due to 2-HTLC outputs)
5064         check_spends!(htlc_success_txn[0], commitment_txn[0]);
5065         check_spends!(htlc_success_txn[1], commitment_txn[0]);
5066         assert_eq!(htlc_success_txn[0].input.len(), 1);
5067         assert_eq!(htlc_success_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5068         assert_eq!(htlc_success_txn[1].input.len(), 1);
5069         assert_eq!(htlc_success_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5070         assert_ne!(htlc_success_txn[0].input[0].previous_output, htlc_success_txn[1].input[0].previous_output);
5071         assert_ne!(htlc_success_txn[1].input[0].previous_output, htlc_timeout_tx.input[0].previous_output);
5072
5073         mine_transaction(&nodes[1], &htlc_timeout_tx);
5074         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5075         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
5076         let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5077         assert!(htlc_updates.update_add_htlcs.is_empty());
5078         assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
5079         let first_htlc_id = htlc_updates.update_fail_htlcs[0].htlc_id;
5080         assert!(htlc_updates.update_fulfill_htlcs.is_empty());
5081         assert!(htlc_updates.update_fail_malformed_htlcs.is_empty());
5082         check_added_monitors!(nodes[1], 1);
5083
5084         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
5085         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
5086         {
5087                 commitment_signed_dance!(nodes[0], nodes[1], &htlc_updates.commitment_signed, false, true);
5088         }
5089         expect_payment_failed_with_update!(nodes[0], duplicate_payment_hash, false, chan_2.0.contents.short_channel_id, true);
5090
5091         // Solve 2nd HTLC by broadcasting on B's chain HTLC-Success Tx from C
5092         mine_transaction(&nodes[1], &htlc_success_txn[1]);
5093         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(196), true, true);
5094         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5095         assert!(updates.update_add_htlcs.is_empty());
5096         assert!(updates.update_fail_htlcs.is_empty());
5097         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
5098         assert_ne!(updates.update_fulfill_htlcs[0].htlc_id, first_htlc_id);
5099         assert!(updates.update_fail_malformed_htlcs.is_empty());
5100         check_added_monitors!(nodes[1], 1);
5101
5102         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
5103         commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false);
5104         expect_payment_sent(&nodes[0], our_payment_preimage, None, true, true);
5105 }
5106
5107 #[test]
5108 fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
5109         let chanmon_cfgs = create_chanmon_cfgs(2);
5110         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5111         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5112         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5113
5114         // Create some initial channels
5115         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5116
5117         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
5118         let local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
5119         assert_eq!(local_txn.len(), 1);
5120         assert_eq!(local_txn[0].input.len(), 1);
5121         check_spends!(local_txn[0], chan_1.3);
5122
5123         // Give B knowledge of preimage to be able to generate a local HTLC-Success Tx
5124         nodes[1].node.claim_funds(payment_preimage);
5125         expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
5126         check_added_monitors!(nodes[1], 1);
5127
5128         mine_transaction(&nodes[1], &local_txn[0]);
5129         check_added_monitors!(nodes[1], 1);
5130         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
5131         let events = nodes[1].node.get_and_clear_pending_msg_events();
5132         match events[0] {
5133                 MessageSendEvent::UpdateHTLCs { .. } => {},
5134                 _ => panic!("Unexpected event"),
5135         }
5136         match events[1] {
5137                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
5138                 _ => panic!("Unexepected event"),
5139         }
5140         let node_tx = {
5141                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5142                 assert_eq!(node_txn.len(), 1);
5143                 assert_eq!(node_txn[0].input.len(), 1);
5144                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5145                 check_spends!(node_txn[0], local_txn[0]);
5146                 node_txn[0].clone()
5147         };
5148
5149         mine_transaction(&nodes[1], &node_tx);
5150         connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1);
5151
5152         // Verify that B is able to spend its own HTLC-Success tx thanks to spendable output event given back by its ChannelMonitor
5153         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5154         assert_eq!(spend_txn.len(), 1);
5155         assert_eq!(spend_txn[0].input.len(), 1);
5156         check_spends!(spend_txn[0], node_tx);
5157         assert_eq!(spend_txn[0].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5158 }
5159
5160 fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, announce_latest: bool) {
5161         // Test that we fail backwards the full set of HTLCs we need to when remote broadcasts an
5162         // unrevoked commitment transaction.
5163         // This includes HTLCs which were below the dust threshold as well as HTLCs which were awaiting
5164         // a remote RAA before they could be failed backwards (and combinations thereof).
5165         // We also test duplicate-hash HTLCs by adding two nodes on each side of the target nodes which
5166         // use the same payment hashes.
5167         // Thus, we use a six-node network:
5168         //
5169         // A \         / E
5170         //    - C - D -
5171         // B /         \ F
5172         // And test where C fails back to A/B when D announces its latest commitment transaction
5173         let chanmon_cfgs = create_chanmon_cfgs(6);
5174         let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
5175         // When this test was written, the default base fee floated based on the HTLC count.
5176         // It is now fixed, so we simply set the fee to the expected value here.
5177         let mut config = test_default_channel_config();
5178         config.channel_config.forwarding_fee_base_msat = 196;
5179         let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs,
5180                 &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
5181         let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
5182
5183         let _chan_0_2 = create_announced_chan_between_nodes(&nodes, 0, 2);
5184         let _chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
5185         let chan_2_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
5186         let chan_3_4 = create_announced_chan_between_nodes(&nodes, 3, 4);
5187         let chan_3_5  = create_announced_chan_between_nodes(&nodes, 3, 5);
5188
5189         // Rebalance and check output sanity...
5190         send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000);
5191         send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000);
5192         assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2);
5193
5194         let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id())
5195                 .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context().holder_dust_limit_satoshis;
5196         // 0th HTLC:
5197         let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5198         // 1st HTLC:
5199         let (_, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5200         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
5201         // 2nd HTLC:
5202         send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_1, nodes[5].node.create_inbound_payment_for_hash(payment_hash_1, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5203         // 3rd HTLC:
5204         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_2, nodes[5].node.create_inbound_payment_for_hash(payment_hash_2, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5205         // 4th HTLC:
5206         let (_, payment_hash_3, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5207         // 5th HTLC:
5208         let (_, payment_hash_4, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5209         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
5210         // 6th HTLC:
5211         send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_3, nodes[5].node.create_inbound_payment_for_hash(payment_hash_3, None, 7200, None).unwrap());
5212         // 7th HTLC:
5213         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_4, nodes[5].node.create_inbound_payment_for_hash(payment_hash_4, None, 7200, None).unwrap());
5214
5215         // 8th HTLC:
5216         let (_, payment_hash_5, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5217         // 9th HTLC:
5218         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
5219         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_5, nodes[5].node.create_inbound_payment_for_hash(payment_hash_5, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5220
5221         // 10th HTLC:
5222         let (_, payment_hash_6, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5223         // 11th HTLC:
5224         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
5225         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_6, nodes[5].node.create_inbound_payment_for_hash(payment_hash_6, None, 7200, None).unwrap());
5226
5227         // Double-check that six of the new HTLC were added
5228         // We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie,
5229         // with to_local and to_remote outputs, 8 outputs and 6 HTLCs not included).
5230         assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2).len(), 1);
5231         assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 8);
5232
5233         // Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go.
5234         // Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs
5235         nodes[4].node.fail_htlc_backwards(&payment_hash_1);
5236         nodes[4].node.fail_htlc_backwards(&payment_hash_3);
5237         nodes[4].node.fail_htlc_backwards(&payment_hash_5);
5238         nodes[4].node.fail_htlc_backwards(&payment_hash_6);
5239         check_added_monitors!(nodes[4], 0);
5240
5241         let failed_destinations = vec![
5242                 HTLCDestination::FailedPayment { payment_hash: payment_hash_1 },
5243                 HTLCDestination::FailedPayment { payment_hash: payment_hash_3 },
5244                 HTLCDestination::FailedPayment { payment_hash: payment_hash_5 },
5245                 HTLCDestination::FailedPayment { payment_hash: payment_hash_6 },
5246         ];
5247         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[4], failed_destinations);
5248         check_added_monitors!(nodes[4], 1);
5249
5250         let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id());
5251         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[0]);
5252         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[1]);
5253         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[2]);
5254         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[3]);
5255         commitment_signed_dance!(nodes[3], nodes[4], four_removes.commitment_signed, false);
5256
5257         // Fail 3rd below-dust and 7th above-dust HTLCs
5258         nodes[5].node.fail_htlc_backwards(&payment_hash_2);
5259         nodes[5].node.fail_htlc_backwards(&payment_hash_4);
5260         check_added_monitors!(nodes[5], 0);
5261
5262         let failed_destinations_2 = vec![
5263                 HTLCDestination::FailedPayment { payment_hash: payment_hash_2 },
5264                 HTLCDestination::FailedPayment { payment_hash: payment_hash_4 },
5265         ];
5266         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[5], failed_destinations_2);
5267         check_added_monitors!(nodes[5], 1);
5268
5269         let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id());
5270         nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[0]);
5271         nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]);
5272         commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false);
5273
5274         let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
5275
5276         // After 4 and 2 removes respectively above in nodes[4] and nodes[5], nodes[3] should receive 6 PaymentForwardedFailed events
5277         let failed_destinations_3 = vec![
5278                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5279                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5280                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5281                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5282                 HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
5283                 HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
5284         ];
5285         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations_3);
5286         check_added_monitors!(nodes[3], 1);
5287         let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
5288         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]);
5289         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[1]);
5290         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[2]);
5291         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[3]);
5292         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[4]);
5293         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[5]);
5294         if deliver_last_raa {
5295                 commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false);
5296         } else {
5297                 let _cs_last_raa = commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false, true, false, true);
5298         }
5299
5300         // D's latest commitment transaction now contains 1st + 2nd + 9th HTLCs (implicitly, they're
5301         // below the dust limit) and the 5th + 6th + 11th HTLCs. It has failed back the 0th, 3rd, 4th,
5302         // 7th, 8th, and 10th, but as we haven't yet delivered the final RAA to C, the fails haven't
5303         // propagated back to A/B yet (and D has two unrevoked commitment transactions).
5304         //
5305         // We now broadcast the latest commitment transaction, which *should* result in failures for
5306         // the 0th, 1st, 2nd, 3rd, 4th, 7th, 8th, 9th, and 10th HTLCs, ie all the below-dust HTLCs and
5307         // the non-broadcast above-dust HTLCs.
5308         //
5309         // Alternatively, we may broadcast the previous commitment transaction, which should only
5310         // result in failures for the below-dust HTLCs, ie the 0th, 1st, 2nd, 3rd, 9th, and 10th HTLCs.
5311         let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
5312
5313         if announce_latest {
5314                 mine_transaction(&nodes[2], &ds_last_commitment_tx[0]);
5315         } else {
5316                 mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]);
5317         }
5318         let events = nodes[2].node.get_and_clear_pending_events();
5319         let close_event = if deliver_last_raa {
5320                 assert_eq!(events.len(), 2 + 6);
5321                 events.last().clone().unwrap()
5322         } else {
5323                 assert_eq!(events.len(), 1);
5324                 events.last().clone().unwrap()
5325         };
5326         match close_event {
5327                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
5328                 _ => panic!("Unexpected event"),
5329         }
5330
5331         connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
5332         check_closed_broadcast!(nodes[2], true);
5333         if deliver_last_raa {
5334                 expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
5335
5336                 let expected_destinations: Vec<HTLCDestination> = repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect();
5337                 expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations);
5338         } else {
5339                 let expected_destinations: Vec<HTLCDestination> = if announce_latest {
5340                         repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(9).collect()
5341                 } else {
5342                         repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(6).collect()
5343                 };
5344
5345                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], expected_destinations);
5346         }
5347         check_added_monitors!(nodes[2], 3);
5348
5349         let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events();
5350         assert_eq!(cs_msgs.len(), 2);
5351         let mut a_done = false;
5352         for msg in cs_msgs {
5353                 match msg {
5354                         MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
5355                                 // Both under-dust HTLCs and the one above-dust HTLC that we had already failed
5356                                 // should be failed-backwards here.
5357                                 let target = if *node_id == nodes[0].node.get_our_node_id() {
5358                                         // If announce_latest, expect 0th, 1st, 4th, 8th, 10th HTLCs, else only 0th, 1st, 10th below-dust HTLCs
5359                                         for htlc in &updates.update_fail_htlcs {
5360                                                 assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 6 || if announce_latest { htlc.htlc_id == 3 || htlc.htlc_id == 5 } else { false });
5361                                         }
5362                                         assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 5 } else { 3 });
5363                                         assert!(!a_done);
5364                                         a_done = true;
5365                                         &nodes[0]
5366                                 } else {
5367                                         // If announce_latest, expect 2nd, 3rd, 7th, 9th HTLCs, else only 2nd, 3rd, 9th below-dust HTLCs
5368                                         for htlc in &updates.update_fail_htlcs {
5369                                                 assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 5 || if announce_latest { htlc.htlc_id == 4 } else { false });
5370                                         }
5371                                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
5372                                         assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 4 } else { 3 });
5373                                         &nodes[1]
5374                                 };
5375                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
5376                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[1]);
5377                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[2]);
5378                                 if announce_latest {
5379                                         target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[3]);
5380                                         if *node_id == nodes[0].node.get_our_node_id() {
5381                                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[4]);
5382                                         }
5383                                 }
5384                                 commitment_signed_dance!(target, nodes[2], updates.commitment_signed, false, true);
5385                         },
5386                         _ => panic!("Unexpected event"),
5387                 }
5388         }
5389
5390         let as_events = nodes[0].node.get_and_clear_pending_events();
5391         assert_eq!(as_events.len(), if announce_latest { 10 } else { 6 });
5392         let mut as_failds = HashSet::new();
5393         let mut as_updates = 0;
5394         for event in as_events.iter() {
5395                 if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
5396                         assert!(as_failds.insert(*payment_hash));
5397                         if *payment_hash != payment_hash_2 {
5398                                 assert_eq!(*payment_failed_permanently, deliver_last_raa);
5399                         } else {
5400                                 assert!(!payment_failed_permanently);
5401                         }
5402                         if let PathFailure::OnPath { network_update: Some(_) } = failure {
5403                                 as_updates += 1;
5404                         }
5405                 } else if let &Event::PaymentFailed { .. } = event {
5406                 } else { panic!("Unexpected event"); }
5407         }
5408         assert!(as_failds.contains(&payment_hash_1));
5409         assert!(as_failds.contains(&payment_hash_2));
5410         if announce_latest {
5411                 assert!(as_failds.contains(&payment_hash_3));
5412                 assert!(as_failds.contains(&payment_hash_5));
5413         }
5414         assert!(as_failds.contains(&payment_hash_6));
5415
5416         let bs_events = nodes[1].node.get_and_clear_pending_events();
5417         assert_eq!(bs_events.len(), if announce_latest { 8 } else { 6 });
5418         let mut bs_failds = HashSet::new();
5419         let mut bs_updates = 0;
5420         for event in bs_events.iter() {
5421                 if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
5422                         assert!(bs_failds.insert(*payment_hash));
5423                         if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 {
5424                                 assert_eq!(*payment_failed_permanently, deliver_last_raa);
5425                         } else {
5426                                 assert!(!payment_failed_permanently);
5427                         }
5428                         if let PathFailure::OnPath { network_update: Some(_) } = failure {
5429                                 bs_updates += 1;
5430                         }
5431                 } else if let &Event::PaymentFailed { .. } = event {
5432                 } else { panic!("Unexpected event"); }
5433         }
5434         assert!(bs_failds.contains(&payment_hash_1));
5435         assert!(bs_failds.contains(&payment_hash_2));
5436         if announce_latest {
5437                 assert!(bs_failds.contains(&payment_hash_4));
5438         }
5439         assert!(bs_failds.contains(&payment_hash_5));
5440
5441         // For each HTLC which was not failed-back by normal process (ie deliver_last_raa), we should
5442         // get a NetworkUpdate. A should have gotten 4 HTLCs which were failed-back due to
5443         // unknown-preimage-etc, B should have gotten 2. Thus, in the
5444         // announce_latest && deliver_last_raa case, we should have 5-4=1 and 4-2=2 NetworkUpdates.
5445         assert_eq!(as_updates, if deliver_last_raa { 1 } else if !announce_latest { 3 } else { 5 });
5446         assert_eq!(bs_updates, if deliver_last_raa { 2 } else if !announce_latest { 3 } else { 4 });
5447 }
5448
5449 #[test]
5450 fn test_fail_backwards_latest_remote_announce_a() {
5451         do_test_fail_backwards_unrevoked_remote_announce(false, true);
5452 }
5453
5454 #[test]
5455 fn test_fail_backwards_latest_remote_announce_b() {
5456         do_test_fail_backwards_unrevoked_remote_announce(true, true);
5457 }
5458
5459 #[test]
5460 fn test_fail_backwards_previous_remote_announce() {
5461         do_test_fail_backwards_unrevoked_remote_announce(false, false);
5462         // Note that true, true doesn't make sense as it implies we announce a revoked state, which is
5463         // tested for in test_commitment_revoked_fail_backward_exhaustive()
5464 }
5465
5466 #[test]
5467 fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
5468         let chanmon_cfgs = create_chanmon_cfgs(2);
5469         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5470         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5471         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5472
5473         // Create some initial channels
5474         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5475
5476         let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
5477         let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
5478         assert_eq!(local_txn[0].input.len(), 1);
5479         check_spends!(local_txn[0], chan_1.3);
5480
5481         // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
5482         mine_transaction(&nodes[0], &local_txn[0]);
5483         check_closed_broadcast!(nodes[0], true);
5484         check_added_monitors!(nodes[0], 1);
5485         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
5486         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
5487
5488         let htlc_timeout = {
5489                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5490                 assert_eq!(node_txn.len(), 1);
5491                 assert_eq!(node_txn[0].input.len(), 1);
5492                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5493                 check_spends!(node_txn[0], local_txn[0]);
5494                 node_txn[0].clone()
5495         };
5496
5497         mine_transaction(&nodes[0], &htlc_timeout);
5498         connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
5499         expect_payment_failed!(nodes[0], our_payment_hash, false);
5500
5501         // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
5502         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
5503         assert_eq!(spend_txn.len(), 3);
5504         check_spends!(spend_txn[0], local_txn[0]);
5505         assert_eq!(spend_txn[1].input.len(), 1);
5506         check_spends!(spend_txn[1], htlc_timeout);
5507         assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5508         assert_eq!(spend_txn[2].input.len(), 2);
5509         check_spends!(spend_txn[2], local_txn[0], htlc_timeout);
5510         assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 ||
5511                 spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32);
5512 }
5513
5514 #[test]
5515 fn test_key_derivation_params() {
5516         // This test is a copy of test_dynamic_spendable_outputs_local_htlc_timeout_tx, with a key
5517         // manager rotation to test that `channel_keys_id` returned in
5518         // [`SpendableOutputDescriptor::DelayedPaymentOutput`] let us re-derive the channel key set to
5519         // then derive a `delayed_payment_key`.
5520
5521         let chanmon_cfgs = create_chanmon_cfgs(3);
5522
5523         // We manually create the node configuration to backup the seed.
5524         let seed = [42; 32];
5525         let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
5526         let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager);
5527         let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[0].logger));
5528         let scorer = RwLock::new(test_utils::TestScorer::new());
5529         let router = test_utils::TestRouter::new(network_graph.clone(), &scorer);
5530         let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) };
5531         let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
5532         node_cfgs.remove(0);
5533         node_cfgs.insert(0, node);
5534
5535         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
5536         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
5537
5538         // Create some initial channels
5539         // Create a dummy channel to advance index by one and thus test re-derivation correctness
5540         // for node 0
5541         let chan_0 = create_announced_chan_between_nodes(&nodes, 0, 2);
5542         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5543         assert_ne!(chan_0.3.output[0].script_pubkey, chan_1.3.output[0].script_pubkey);
5544
5545         // Ensure all nodes are at the same height
5546         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
5547         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
5548         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
5549         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
5550
5551         let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
5552         let local_txn_0 = get_local_commitment_txn!(nodes[0], chan_0.2);
5553         let local_txn_1 = get_local_commitment_txn!(nodes[0], chan_1.2);
5554         assert_eq!(local_txn_1[0].input.len(), 1);
5555         check_spends!(local_txn_1[0], chan_1.3);
5556
5557         // We check funding pubkey are unique
5558         let (from_0_funding_key_0, from_0_funding_key_1) = (PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][36..69]));
5559         let (from_1_funding_key_0, from_1_funding_key_1) = (PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][36..69]));
5560         if from_0_funding_key_0 == from_1_funding_key_0
5561             || from_0_funding_key_0 == from_1_funding_key_1
5562             || from_0_funding_key_1 == from_1_funding_key_0
5563             || from_0_funding_key_1 == from_1_funding_key_1 {
5564                 panic!("Funding pubkeys aren't unique");
5565         }
5566
5567         // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
5568         mine_transaction(&nodes[0], &local_txn_1[0]);
5569         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
5570         check_closed_broadcast!(nodes[0], true);
5571         check_added_monitors!(nodes[0], 1);
5572         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
5573
5574         let htlc_timeout = {
5575                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5576                 assert_eq!(node_txn.len(), 1);
5577                 assert_eq!(node_txn[0].input.len(), 1);
5578                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5579                 check_spends!(node_txn[0], local_txn_1[0]);
5580                 node_txn[0].clone()
5581         };
5582
5583         mine_transaction(&nodes[0], &htlc_timeout);
5584         connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
5585         expect_payment_failed!(nodes[0], our_payment_hash, false);
5586
5587         // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
5588         let new_keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
5589         let spend_txn = check_spendable_outputs!(nodes[0], new_keys_manager);
5590         assert_eq!(spend_txn.len(), 3);
5591         check_spends!(spend_txn[0], local_txn_1[0]);
5592         assert_eq!(spend_txn[1].input.len(), 1);
5593         check_spends!(spend_txn[1], htlc_timeout);
5594         assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5595         assert_eq!(spend_txn[2].input.len(), 2);
5596         check_spends!(spend_txn[2], local_txn_1[0], htlc_timeout);
5597         assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 ||
5598                 spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32);
5599 }
5600
5601 #[test]
5602 fn test_static_output_closing_tx() {
5603         let chanmon_cfgs = create_chanmon_cfgs(2);
5604         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5605         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5606         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5607
5608         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5609
5610         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
5611         let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
5612
5613         mine_transaction(&nodes[0], &closing_tx);
5614         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
5615         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
5616
5617         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
5618         assert_eq!(spend_txn.len(), 1);
5619         check_spends!(spend_txn[0], closing_tx);
5620
5621         mine_transaction(&nodes[1], &closing_tx);
5622         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
5623         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5624
5625         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5626         assert_eq!(spend_txn.len(), 1);
5627         check_spends!(spend_txn[0], closing_tx);
5628 }
5629
5630 fn do_htlc_claim_local_commitment_only(use_dust: bool) {
5631         let chanmon_cfgs = create_chanmon_cfgs(2);
5632         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5633         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5634         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5635         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5636
5637         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3_000_000 });
5638
5639         // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being
5640         // present in B's local commitment transaction, but none of A's commitment transactions.
5641         nodes[1].node.claim_funds(payment_preimage);
5642         check_added_monitors!(nodes[1], 1);
5643         expect_payment_claimed!(nodes[1], payment_hash, if use_dust { 50000 } else { 3_000_000 });
5644
5645         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5646         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
5647         expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
5648
5649         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
5650         check_added_monitors!(nodes[0], 1);
5651         let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5652         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0);
5653         check_added_monitors!(nodes[1], 1);
5654
5655         let starting_block = nodes[1].best_block_info();
5656         let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
5657         for _ in starting_block.1 + 1..TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + starting_block.1 + 2 {
5658                 connect_block(&nodes[1], &block);
5659                 block.header.prev_blockhash = block.block_hash();
5660         }
5661         test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
5662         check_closed_broadcast!(nodes[1], true);
5663         check_added_monitors!(nodes[1], 1);
5664         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
5665 }
5666
5667 fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
5668         let chanmon_cfgs = create_chanmon_cfgs(2);
5669         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5670         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5671         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5672         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5673
5674         let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], if use_dust { 50000 } else { 3000000 });
5675         nodes[0].node.send_payment_with_route(&route, payment_hash,
5676                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
5677         check_added_monitors!(nodes[0], 1);
5678
5679         let _as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5680
5681         // As far as A is concerned, the HTLC is now present only in the latest remote commitment
5682         // transaction, however it is not in A's latest local commitment, so we can just broadcast that
5683         // to "time out" the HTLC.
5684
5685         let starting_block = nodes[1].best_block_info();
5686         let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
5687
5688         for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + starting_block.1 + 2 {
5689                 connect_block(&nodes[0], &block);
5690                 block.header.prev_blockhash = block.block_hash();
5691         }
5692         test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
5693         check_closed_broadcast!(nodes[0], true);
5694         check_added_monitors!(nodes[0], 1);
5695         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
5696 }
5697
5698 fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
5699         let chanmon_cfgs = create_chanmon_cfgs(3);
5700         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
5701         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
5702         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
5703         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5704
5705         // Fail the payment, but don't deliver A's final RAA, resulting in the HTLC only being present
5706         // in B's previous (unrevoked) commitment transaction, but none of A's commitment transactions.
5707         // Also optionally test that we *don't* fail the channel in case the commitment transaction was
5708         // actually revoked.
5709         let htlc_value = if use_dust { 50000 } else { 3000000 };
5710         let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
5711         nodes[1].node.fail_htlc_backwards(&our_payment_hash);
5712         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
5713         check_added_monitors!(nodes[1], 1);
5714
5715         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5716         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
5717         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
5718         check_added_monitors!(nodes[0], 1);
5719         let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5720         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0);
5721         check_added_monitors!(nodes[1], 1);
5722         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.1);
5723         check_added_monitors!(nodes[1], 1);
5724         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
5725
5726         if check_revoke_no_close {
5727                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
5728                 check_added_monitors!(nodes[0], 1);
5729         }
5730
5731         let starting_block = nodes[1].best_block_info();
5732         let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
5733         for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 2 {
5734                 connect_block(&nodes[0], &block);
5735                 block.header.prev_blockhash = block.block_hash();
5736         }
5737         if !check_revoke_no_close {
5738                 test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
5739                 check_closed_broadcast!(nodes[0], true);
5740                 check_added_monitors!(nodes[0], 1);
5741                 check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
5742         } else {
5743                 expect_payment_failed!(nodes[0], our_payment_hash, true);
5744         }
5745 }
5746
5747 // Test that we close channels on-chain when broadcastable HTLCs reach their timeout window.
5748 // There are only a few cases to test here:
5749 //  * its not really normative behavior, but we test that below-dust HTLCs "included" in
5750 //    broadcastable commitment transactions result in channel closure,
5751 //  * its included in an unrevoked-but-previous remote commitment transaction,
5752 //  * its included in the latest remote or local commitment transactions.
5753 // We test each of the three possible commitment transactions individually and use both dust and
5754 // non-dust HTLCs.
5755 // Note that we don't bother testing both outbound and inbound HTLC failures for each case, and we
5756 // assume they are handled the same across all six cases, as both outbound and inbound failures are
5757 // tested for at least one of the cases in other tests.
5758 #[test]
5759 fn htlc_claim_single_commitment_only_a() {
5760         do_htlc_claim_local_commitment_only(true);
5761         do_htlc_claim_local_commitment_only(false);
5762
5763         do_htlc_claim_current_remote_commitment_only(true);
5764         do_htlc_claim_current_remote_commitment_only(false);
5765 }
5766
5767 #[test]
5768 fn htlc_claim_single_commitment_only_b() {
5769         do_htlc_claim_previous_remote_commitment_only(true, false);
5770         do_htlc_claim_previous_remote_commitment_only(false, false);
5771         do_htlc_claim_previous_remote_commitment_only(true, true);
5772         do_htlc_claim_previous_remote_commitment_only(false, true);
5773 }
5774
5775 #[test]
5776 #[should_panic]
5777 fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be on its own as we are catching a panic
5778         let chanmon_cfgs = create_chanmon_cfgs(2);
5779         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5780         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5781         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5782         // Force duplicate randomness for every get-random call
5783         for node in nodes.iter() {
5784                 *node.keys_manager.override_random_bytes.lock().unwrap() = Some([0; 32]);
5785         }
5786
5787         // BOLT #2 spec: Sending node must ensure temporary_channel_id is unique from any other channel ID with the same peer.
5788         let channel_value_satoshis=10000;
5789         let push_msat=10001;
5790         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).unwrap();
5791         let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5792         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel);
5793         get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
5794
5795         // Create a second channel with the same random values. This used to panic due to a colliding
5796         // channel_id, but now panics due to a colliding outbound SCID alias.
5797         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err());
5798 }
5799
5800 #[test]
5801 fn bolt2_open_channel_sending_node_checks_part2() {
5802         let chanmon_cfgs = create_chanmon_cfgs(2);
5803         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5804         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5805         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5806
5807         // BOLT #2 spec: Sending node must set funding_satoshis to less than 2^24 satoshis
5808         let channel_value_satoshis=2^24;
5809         let push_msat=10001;
5810         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err());
5811
5812         // BOLT #2 spec: Sending node must set push_msat to equal or less than 1000 * funding_satoshis
5813         let channel_value_satoshis=10000;
5814         // Test when push_msat is equal to 1000 * funding_satoshis.
5815         let push_msat=1000*channel_value_satoshis+1;
5816         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err());
5817
5818         // BOLT #2 spec: Sending node must set set channel_reserve_satoshis greater than or equal to dust_limit_satoshis
5819         let channel_value_satoshis=10000;
5820         let push_msat=10001;
5821         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_ok()); //Create a valid channel
5822         let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5823         assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.dust_limit_satoshis);
5824
5825         // BOLT #2 spec: Sending node must set undefined bits in channel_flags to 0
5826         // Only the least-significant bit of channel_flags is currently defined resulting in channel_flags only having one of two possible states 0 or 1
5827         assert!(node0_to_1_send_open_channel.channel_flags<=1);
5828
5829         // BOLT #2 spec: Sending node should set to_self_delay sufficient to ensure the sender can irreversibly spend a commitment transaction output, in case of misbehaviour by the receiver.
5830         assert!(BREAKDOWN_TIMEOUT>0);
5831         assert!(node0_to_1_send_open_channel.to_self_delay==BREAKDOWN_TIMEOUT);
5832
5833         // BOLT #2 spec: Sending node must ensure the chain_hash value identifies the chain it wishes to open the channel within.
5834         let chain_hash = ChainHash::using_genesis_block(Network::Testnet);
5835         assert_eq!(node0_to_1_send_open_channel.chain_hash, chain_hash);
5836
5837         // BOLT #2 spec: Sending node must set funding_pubkey, revocation_basepoint, htlc_basepoint, payment_basepoint, and delayed_payment_basepoint to valid DER-encoded, compressed, secp256k1 pubkeys.
5838         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.funding_pubkey.serialize()).is_ok());
5839         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.revocation_basepoint.serialize()).is_ok());
5840         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.htlc_basepoint.serialize()).is_ok());
5841         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.payment_point.serialize()).is_ok());
5842         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.delayed_payment_basepoint.serialize()).is_ok());
5843 }
5844
5845 #[test]
5846 fn bolt2_open_channel_sane_dust_limit() {
5847         let chanmon_cfgs = create_chanmon_cfgs(2);
5848         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5849         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5850         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5851
5852         let channel_value_satoshis=1000000;
5853         let push_msat=10001;
5854         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).unwrap();
5855         let mut node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5856         node0_to_1_send_open_channel.dust_limit_satoshis = 547;
5857         node0_to_1_send_open_channel.channel_reserve_satoshis = 100001;
5858
5859         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel);
5860         let events = nodes[1].node.get_and_clear_pending_msg_events();
5861         let err_msg = match events[0] {
5862                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
5863                         msg.clone()
5864                 },
5865                 _ => panic!("Unexpected event"),
5866         };
5867         assert_eq!(err_msg.data, "dust_limit_satoshis (547) is greater than the implementation limit (546)");
5868 }
5869
5870 // Test that if we fail to send an HTLC that is being freed from the holding cell, and the HTLC
5871 // originated from our node, its failure is surfaced to the user. We trigger this failure to
5872 // free the HTLC by increasing our fee while the HTLC is in the holding cell such that the HTLC
5873 // is no longer affordable once it's freed.
5874 #[test]
5875 fn test_fail_holding_cell_htlc_upon_free() {
5876         let chanmon_cfgs = create_chanmon_cfgs(2);
5877         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5878         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5879         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5880         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
5881
5882         // First nodes[0] generates an update_fee, setting the channel's
5883         // pending_update_fee.
5884         {
5885                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
5886                 *feerate_lock += 20;
5887         }
5888         nodes[0].node.timer_tick_occurred();
5889         check_added_monitors!(nodes[0], 1);
5890
5891         let events = nodes[0].node.get_and_clear_pending_msg_events();
5892         assert_eq!(events.len(), 1);
5893         let (update_msg, commitment_signed) = match events[0] {
5894                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
5895                         (update_fee.as_ref(), commitment_signed)
5896                 },
5897                 _ => panic!("Unexpected event"),
5898         };
5899
5900         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
5901
5902         let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5903         let channel_reserve = chan_stat.channel_reserve_msat;
5904         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
5905         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
5906
5907         // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
5908         let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
5909         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
5910
5911         // Send a payment which passes reserve checks but gets stuck in the holding cell.
5912         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
5913                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
5914         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5915         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send);
5916
5917         // Flush the pending fee update.
5918         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
5919         let (as_revoke_and_ack, _) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5920         check_added_monitors!(nodes[1], 1);
5921         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack);
5922         check_added_monitors!(nodes[0], 1);
5923
5924         // Upon receipt of the RAA, there will be an attempt to resend the holding cell
5925         // HTLC, but now that the fee has been raised the payment will now fail, causing
5926         // us to surface its failure to the user.
5927         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5928         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
5929         nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", chan.2), 1);
5930
5931         // Check that the payment failed to be sent out.
5932         let events = nodes[0].node.get_and_clear_pending_events();
5933         assert_eq!(events.len(), 2);
5934         match &events[0] {
5935                 &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => {
5936                         assert_eq!(PaymentId(our_payment_hash.0), *payment_id.as_ref().unwrap());
5937                         assert_eq!(our_payment_hash.clone(), *payment_hash);
5938                         assert_eq!(*payment_failed_permanently, false);
5939                         assert_eq!(*short_channel_id, Some(route.paths[0].hops[0].short_channel_id));
5940                 },
5941                 _ => panic!("Unexpected event"),
5942         }
5943         match &events[1] {
5944                 &Event::PaymentFailed { ref payment_hash, .. } => {
5945                         assert_eq!(our_payment_hash.clone(), *payment_hash);
5946                 },
5947                 _ => panic!("Unexpected event"),
5948         }
5949 }
5950
5951 // Test that if multiple HTLCs are released from the holding cell and one is
5952 // valid but the other is no longer valid upon release, the valid HTLC can be
5953 // successfully completed while the other one fails as expected.
5954 #[test]
5955 fn test_free_and_fail_holding_cell_htlcs() {
5956         let chanmon_cfgs = create_chanmon_cfgs(2);
5957         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5958         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5959         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5960         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
5961
5962         // First nodes[0] generates an update_fee, setting the channel's
5963         // pending_update_fee.
5964         {
5965                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
5966                 *feerate_lock += 200;
5967         }
5968         nodes[0].node.timer_tick_occurred();
5969         check_added_monitors!(nodes[0], 1);
5970
5971         let events = nodes[0].node.get_and_clear_pending_msg_events();
5972         assert_eq!(events.len(), 1);
5973         let (update_msg, commitment_signed) = match events[0] {
5974                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
5975                         (update_fee.as_ref(), commitment_signed)
5976                 },
5977                 _ => panic!("Unexpected event"),
5978         };
5979
5980         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
5981
5982         let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5983         let channel_reserve = chan_stat.channel_reserve_msat;
5984         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
5985         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
5986
5987         // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
5988         let amt_1 = 20000;
5989         let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features) - amt_1;
5990         let (route_1, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_1);
5991         let (route_2, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_2);
5992
5993         // Send 2 payments which pass reserve checks but get stuck in the holding cell.
5994         nodes[0].node.send_payment_with_route(&route_1, payment_hash_1,
5995                 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
5996         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5997         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1);
5998         let payment_id_2 = PaymentId(nodes[0].keys_manager.get_secure_random_bytes());
5999         nodes[0].node.send_payment_with_route(&route_2, payment_hash_2,
6000                 RecipientOnionFields::secret_only(payment_secret_2), payment_id_2).unwrap();
6001         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6002         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1 + amt_2);
6003
6004         // Flush the pending fee update.
6005         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
6006         let (revoke_and_ack, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6007         check_added_monitors!(nodes[1], 1);
6008         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_and_ack);
6009         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
6010         check_added_monitors!(nodes[0], 2);
6011
6012         // Upon receipt of the RAA, there will be an attempt to resend the holding cell HTLCs,
6013         // but now that the fee has been raised the second payment will now fail, causing us
6014         // to surface its failure to the user. The first payment should succeed.
6015         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6016         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
6017         nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", chan.2), 1);
6018
6019         // Check that the second payment failed to be sent out.
6020         let events = nodes[0].node.get_and_clear_pending_events();
6021         assert_eq!(events.len(), 2);
6022         match &events[0] {
6023                 &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => {
6024                         assert_eq!(payment_id_2, *payment_id.as_ref().unwrap());
6025                         assert_eq!(payment_hash_2.clone(), *payment_hash);
6026                         assert_eq!(*payment_failed_permanently, false);
6027                         assert_eq!(*short_channel_id, Some(route_2.paths[0].hops[0].short_channel_id));
6028                 },
6029                 _ => panic!("Unexpected event"),
6030         }
6031         match &events[1] {
6032                 &Event::PaymentFailed { ref payment_hash, .. } => {
6033                         assert_eq!(payment_hash_2.clone(), *payment_hash);
6034                 },
6035                 _ => panic!("Unexpected event"),
6036         }
6037
6038         // Complete the first payment and the RAA from the fee update.
6039         let (payment_event, send_raa_event) = {
6040                 let mut msgs = nodes[0].node.get_and_clear_pending_msg_events();
6041                 assert_eq!(msgs.len(), 2);
6042                 (SendEvent::from_event(msgs.remove(0)), msgs.remove(0))
6043         };
6044         let raa = match send_raa_event {
6045                 MessageSendEvent::SendRevokeAndACK { msg, .. } => msg,
6046                 _ => panic!("Unexpected event"),
6047         };
6048         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
6049         check_added_monitors!(nodes[1], 1);
6050         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6051         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6052         let events = nodes[1].node.get_and_clear_pending_events();
6053         assert_eq!(events.len(), 1);
6054         match events[0] {
6055                 Event::PendingHTLCsForwardable { .. } => {},
6056                 _ => panic!("Unexpected event"),
6057         }
6058         nodes[1].node.process_pending_htlc_forwards();
6059         let events = nodes[1].node.get_and_clear_pending_events();
6060         assert_eq!(events.len(), 1);
6061         match events[0] {
6062                 Event::PaymentClaimable { .. } => {},
6063                 _ => panic!("Unexpected event"),
6064         }
6065         nodes[1].node.claim_funds(payment_preimage_1);
6066         check_added_monitors!(nodes[1], 1);
6067         expect_payment_claimed!(nodes[1], payment_hash_1, amt_1);
6068
6069         let update_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6070         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msgs.update_fulfill_htlcs[0]);
6071         commitment_signed_dance!(nodes[0], nodes[1], update_msgs.commitment_signed, false, true);
6072         expect_payment_sent!(nodes[0], payment_preimage_1);
6073 }
6074
6075 // Test that if we fail to forward an HTLC that is being freed from the holding cell that the
6076 // HTLC is failed backwards. We trigger this failure to forward the freed HTLC by increasing
6077 // our fee while the HTLC is in the holding cell such that the HTLC is no longer affordable
6078 // once it's freed.
6079 #[test]
6080 fn test_fail_holding_cell_htlc_upon_free_multihop() {
6081         let chanmon_cfgs = create_chanmon_cfgs(3);
6082         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6083         // Avoid having to include routing fees in calculations
6084         let mut config = test_default_channel_config();
6085         config.channel_config.forwarding_fee_base_msat = 0;
6086         config.channel_config.forwarding_fee_proportional_millionths = 0;
6087         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
6088         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6089         let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6090         let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000);
6091
6092         // First nodes[1] generates an update_fee, setting the channel's
6093         // pending_update_fee.
6094         {
6095                 let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
6096                 *feerate_lock += 20;
6097         }
6098         nodes[1].node.timer_tick_occurred();
6099         check_added_monitors!(nodes[1], 1);
6100
6101         let events = nodes[1].node.get_and_clear_pending_msg_events();
6102         assert_eq!(events.len(), 1);
6103         let (update_msg, commitment_signed) = match events[0] {
6104                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
6105                         (update_fee.as_ref(), commitment_signed)
6106                 },
6107                 _ => panic!("Unexpected event"),
6108         };
6109
6110         nodes[2].node.handle_update_fee(&nodes[1].node.get_our_node_id(), update_msg.unwrap());
6111
6112         let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan_0_1.2);
6113         let channel_reserve = chan_stat.channel_reserve_msat;
6114         let feerate = get_feerate!(nodes[0], nodes[1], chan_0_1.2);
6115         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_0_1.2);
6116
6117         // Send a payment which passes reserve checks but gets stuck in the holding cell.
6118         let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
6119         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], max_can_send);
6120         let payment_event = {
6121                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6122                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6123                 check_added_monitors!(nodes[0], 1);
6124
6125                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6126                 assert_eq!(events.len(), 1);
6127
6128                 SendEvent::from_event(events.remove(0))
6129         };
6130         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6131         check_added_monitors!(nodes[1], 0);
6132         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6133         expect_pending_htlcs_forwardable!(nodes[1]);
6134
6135         chan_stat = get_channel_value_stat!(nodes[1], nodes[2], chan_1_2.2);
6136         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send);
6137
6138         // Flush the pending fee update.
6139         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
6140         let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
6141         check_added_monitors!(nodes[2], 1);
6142         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &raa);
6143         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &commitment_signed);
6144         check_added_monitors!(nodes[1], 2);
6145
6146         // A final RAA message is generated to finalize the fee update.
6147         let events = nodes[1].node.get_and_clear_pending_msg_events();
6148         assert_eq!(events.len(), 1);
6149
6150         let raa_msg = match &events[0] {
6151                 &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => {
6152                         msg.clone()
6153                 },
6154                 _ => panic!("Unexpected event"),
6155         };
6156
6157         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa_msg);
6158         check_added_monitors!(nodes[2], 1);
6159         assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
6160
6161         // nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process.
6162         let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events();
6163         assert_eq!(process_htlc_forwards_event.len(), 2);
6164         match &process_htlc_forwards_event[0] {
6165                 &Event::PendingHTLCsForwardable { .. } => {},
6166                 _ => panic!("Unexpected event"),
6167         }
6168
6169         // In response, we call ChannelManager's process_pending_htlc_forwards
6170         nodes[1].node.process_pending_htlc_forwards();
6171         check_added_monitors!(nodes[1], 1);
6172
6173         // This causes the HTLC to be failed backwards.
6174         let fail_event = nodes[1].node.get_and_clear_pending_msg_events();
6175         assert_eq!(fail_event.len(), 1);
6176         let (fail_msg, commitment_signed) = match &fail_event[0] {
6177                 &MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
6178                         assert_eq!(updates.update_add_htlcs.len(), 0);
6179                         assert_eq!(updates.update_fulfill_htlcs.len(), 0);
6180                         assert_eq!(updates.update_fail_malformed_htlcs.len(), 0);
6181                         assert_eq!(updates.update_fail_htlcs.len(), 1);
6182                         (updates.update_fail_htlcs[0].clone(), updates.commitment_signed.clone())
6183                 },
6184                 _ => panic!("Unexpected event"),
6185         };
6186
6187         // Pass the failure messages back to nodes[0].
6188         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg);
6189         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
6190
6191         // Complete the HTLC failure+removal process.
6192         let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6193         check_added_monitors!(nodes[0], 1);
6194         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
6195         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed);
6196         check_added_monitors!(nodes[1], 2);
6197         let final_raa_event = nodes[1].node.get_and_clear_pending_msg_events();
6198         assert_eq!(final_raa_event.len(), 1);
6199         let raa = match &final_raa_event[0] {
6200                 &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => msg.clone(),
6201                 _ => panic!("Unexpected event"),
6202         };
6203         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa);
6204         expect_payment_failed_with_update!(nodes[0], our_payment_hash, false, chan_1_2.0.contents.short_channel_id, false);
6205         check_added_monitors!(nodes[0], 1);
6206 }
6207
6208 // BOLT 2 Requirements for the Sender when constructing and sending an update_add_htlc message.
6209 // BOLT 2 Requirement: MUST NOT offer amount_msat it cannot pay for in the remote commitment transaction at the current feerate_per_kw (see "Updating Fees") while maintaining its channel reserve.
6210 //TODO: I don't believe this is explicitly enforced when sending an HTLC but as the Fee aspect of the BOLT specs is in flux leaving this as a TODO.
6211
6212 #[test]
6213 fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() {
6214         //BOLT2 Requirement: MUST NOT offer amount_msat below the receiving node's htlc_minimum_msat (same validation check catches both of these)
6215         let chanmon_cfgs = create_chanmon_cfgs(2);
6216         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6217         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6218         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6219         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6220
6221         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6222         route.paths[0].hops[0].fee_msat = 100;
6223
6224         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6225                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6226                 ), true, APIError::ChannelUnavailable { .. }, {});
6227         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6228 }
6229
6230 #[test]
6231 fn test_update_add_htlc_bolt2_sender_zero_value_msat() {
6232         //BOLT2 Requirement: MUST offer amount_msat greater than 0.
6233         let chanmon_cfgs = create_chanmon_cfgs(2);
6234         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6235         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6236         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6237         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6238
6239         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6240         route.paths[0].hops[0].fee_msat = 0;
6241         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6242                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)),
6243                 true, APIError::ChannelUnavailable { ref err },
6244                 assert_eq!(err, "Cannot send 0-msat HTLC"));
6245
6246         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6247         nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send 0-msat HTLC", 1);
6248 }
6249
6250 #[test]
6251 fn test_update_add_htlc_bolt2_receiver_zero_value_msat() {
6252         //BOLT2 Requirement: MUST offer amount_msat greater than 0.
6253         let chanmon_cfgs = create_chanmon_cfgs(2);
6254         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6255         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6256         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6257         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6258
6259         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6260         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6261                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6262         check_added_monitors!(nodes[0], 1);
6263         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6264         updates.update_add_htlcs[0].amount_msat = 0;
6265
6266         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6267         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1);
6268         check_closed_broadcast!(nodes[1], true).unwrap();
6269         check_added_monitors!(nodes[1], 1);
6270         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() },
6271                 [nodes[0].node.get_our_node_id()], 100000);
6272 }
6273
6274 #[test]
6275 fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() {
6276         //BOLT 2 Requirement: MUST set cltv_expiry less than 500000000.
6277         //It is enforced when constructing a route.
6278         let chanmon_cfgs = create_chanmon_cfgs(2);
6279         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6280         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6281         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6282         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
6283
6284         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0)
6285                 .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
6286         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000);
6287         route.paths[0].hops.last_mut().unwrap().cltv_expiry_delta = 500000001;
6288         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6289                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6290                 ), true, APIError::InvalidRoute { ref err },
6291                 assert_eq!(err, &"Channel CLTV overflowed?"));
6292 }
6293
6294 #[test]
6295 fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() {
6296         //BOLT 2 Requirement: if result would be offering more than the remote's max_accepted_htlcs HTLCs, in the remote commitment transaction: MUST NOT add an HTLC.
6297         //BOLT 2 Requirement: for the first HTLC it offers MUST set id to 0.
6298         //BOLT 2 Requirement: MUST increase the value of id by 1 for each successive offer.
6299         let chanmon_cfgs = create_chanmon_cfgs(2);
6300         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6301         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6302         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6303         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
6304         let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
6305                 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().counterparty_max_accepted_htlcs as u64;
6306
6307         // Fetch a route in advance as we will be unable to once we're unable to send.
6308         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6309         for i in 0..max_accepted_htlcs {
6310                 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6311                 let payment_event = {
6312                         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6313                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6314                         check_added_monitors!(nodes[0], 1);
6315
6316                         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6317                         assert_eq!(events.len(), 1);
6318                         if let MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate{ update_add_htlcs: ref htlcs, .. }, } = events[0] {
6319                                 assert_eq!(htlcs[0].htlc_id, i);
6320                         } else {
6321                                 assert!(false);
6322                         }
6323                         SendEvent::from_event(events.remove(0))
6324                 };
6325                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6326                 check_added_monitors!(nodes[1], 0);
6327                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6328
6329                 expect_pending_htlcs_forwardable!(nodes[1]);
6330                 expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 100000);
6331         }
6332         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6333                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6334                 ), true, APIError::ChannelUnavailable { .. }, {});
6335
6336         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6337 }
6338
6339 #[test]
6340 fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() {
6341         //BOLT 2 Requirement: if the sum of total offered HTLCs would exceed the remote's max_htlc_value_in_flight_msat: MUST NOT add an HTLC.
6342         let chanmon_cfgs = create_chanmon_cfgs(2);
6343         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6344         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6345         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6346         let channel_value = 100000;
6347         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 0);
6348         let max_in_flight = get_channel_value_stat!(nodes[0], nodes[1], chan.2).counterparty_max_htlc_value_in_flight_msat;
6349
6350         send_payment(&nodes[0], &vec!(&nodes[1])[..], max_in_flight);
6351
6352         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_in_flight);
6353         // Manually create a route over our max in flight (which our router normally automatically
6354         // limits us to.
6355         route.paths[0].hops[0].fee_msat =  max_in_flight + 1;
6356         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6357                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6358                 ), true, APIError::ChannelUnavailable { .. }, {});
6359         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6360
6361         send_payment(&nodes[0], &[&nodes[1]], max_in_flight);
6362 }
6363
6364 // BOLT 2 Requirements for the Receiver when handling an update_add_htlc message.
6365 #[test]
6366 fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
6367         //BOLT2 Requirement: receiving an amount_msat equal to 0, OR less than its own htlc_minimum_msat -> SHOULD fail the channel.
6368         let chanmon_cfgs = create_chanmon_cfgs(2);
6369         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6370         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6371         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6372         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6373         let htlc_minimum_msat: u64;
6374         {
6375                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
6376                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
6377                 let channel = chan_lock.channel_by_id.get(&chan.2).unwrap();
6378                 htlc_minimum_msat = channel.context().get_holder_htlc_minimum_msat();
6379         }
6380
6381         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat);
6382         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6383                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6384         check_added_monitors!(nodes[0], 1);
6385         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6386         updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat-1;
6387         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6388         assert!(nodes[1].node.list_channels().is_empty());
6389         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6390         assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
6391         check_added_monitors!(nodes[1], 1);
6392         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6393 }
6394
6395 #[test]
6396 fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
6397         //BOLT2 Requirement: receiving an amount_msat that the sending node cannot afford at the current feerate_per_kw (while maintaining its channel reserve): SHOULD fail the channel
6398         let chanmon_cfgs = create_chanmon_cfgs(2);
6399         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6400         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6401         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6402         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6403
6404         let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6405         let channel_reserve = chan_stat.channel_reserve_msat;
6406         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
6407         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
6408         // The 2* and +1 are for the fee spike reserve.
6409         let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
6410
6411         let max_can_send = 5000000 - channel_reserve - commit_tx_fee_outbound;
6412         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
6413         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6414                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6415         check_added_monitors!(nodes[0], 1);
6416         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6417
6418         // Even though channel-initiator senders are required to respect the fee_spike_reserve,
6419         // at this time channel-initiatee receivers are not required to enforce that senders
6420         // respect the fee_spike_reserve.
6421         updates.update_add_htlcs[0].amount_msat = max_can_send + commit_tx_fee_outbound + 1;
6422         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6423
6424         assert!(nodes[1].node.list_channels().is_empty());
6425         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6426         assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
6427         check_added_monitors!(nodes[1], 1);
6428         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6429 }
6430
6431 #[test]
6432 fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
6433         //BOLT 2 Requirement: if a sending node adds more than its max_accepted_htlcs HTLCs to its local commitment transaction: SHOULD fail the channel
6434         //BOLT 2 Requirement: MUST allow multiple HTLCs with the same payment_hash.
6435         let chanmon_cfgs = create_chanmon_cfgs(2);
6436         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6437         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6438         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6439         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6440
6441         let send_amt = 3999999;
6442         let (mut route, our_payment_hash, _, our_payment_secret) =
6443                 get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
6444         route.paths[0].hops[0].fee_msat = send_amt;
6445         let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
6446         let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1;
6447         let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route.paths[0], &session_priv).unwrap();
6448         let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
6449                 &route.paths[0], send_amt, RecipientOnionFields::secret_only(our_payment_secret), cur_height, &None).unwrap();
6450         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap();
6451
6452         let mut msg = msgs::UpdateAddHTLC {
6453                 channel_id: chan.2,
6454                 htlc_id: 0,
6455                 amount_msat: 1000,
6456                 payment_hash: our_payment_hash,
6457                 cltv_expiry: htlc_cltv,
6458                 onion_routing_packet: onion_packet.clone(),
6459                 skimmed_fee_msat: None,
6460         };
6461
6462         for i in 0..50 {
6463                 msg.htlc_id = i as u64;
6464                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
6465         }
6466         msg.htlc_id = (50) as u64;
6467         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
6468
6469         assert!(nodes[1].node.list_channels().is_empty());
6470         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6471         assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
6472         check_added_monitors!(nodes[1], 1);
6473         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6474 }
6475
6476 #[test]
6477 fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() {
6478         //OR adds more than its max_htlc_value_in_flight_msat worth of offered HTLCs to its local commitment transaction: SHOULD fail the channel
6479         let chanmon_cfgs = create_chanmon_cfgs(2);
6480         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6481         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6482         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6483         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6484
6485         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6486         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6487                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6488         check_added_monitors!(nodes[0], 1);
6489         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6490         updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], nodes[0], chan.2).counterparty_max_htlc_value_in_flight_msat + 1;
6491         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6492
6493         assert!(nodes[1].node.list_channels().is_empty());
6494         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6495         assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str()));
6496         check_added_monitors!(nodes[1], 1);
6497         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 1000000);
6498 }
6499
6500 #[test]
6501 fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() {
6502         //BOLT2 Requirement: if sending node sets cltv_expiry to greater or equal to 500000000: SHOULD fail the channel.
6503         let chanmon_cfgs = create_chanmon_cfgs(2);
6504         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6505         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6506         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6507
6508         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6509         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6510         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6511                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6512         check_added_monitors!(nodes[0], 1);
6513         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6514         updates.update_add_htlcs[0].cltv_expiry = 500000000;
6515         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6516
6517         assert!(nodes[1].node.list_channels().is_empty());
6518         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6519         assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height");
6520         check_added_monitors!(nodes[1], 1);
6521         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6522 }
6523
6524 #[test]
6525 fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
6526         //BOLT 2 requirement: if the sender did not previously acknowledge the commitment of that HTLC: MUST ignore a repeated id value after a reconnection.
6527         // We test this by first testing that that repeated HTLCs pass commitment signature checks
6528         // after disconnect and that non-sequential htlc_ids result in a channel failure.
6529         let chanmon_cfgs = create_chanmon_cfgs(2);
6530         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6531         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6532         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6533
6534         create_announced_chan_between_nodes(&nodes, 0, 1);
6535         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6536         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6537                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6538         check_added_monitors!(nodes[0], 1);
6539         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6540         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6541
6542         //Disconnect and Reconnect
6543         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
6544         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
6545         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
6546                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
6547         }, true).unwrap();
6548         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
6549         assert_eq!(reestablish_1.len(), 1);
6550         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
6551                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
6552         }, false).unwrap();
6553         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
6554         assert_eq!(reestablish_2.len(), 1);
6555         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
6556         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
6557         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
6558         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
6559
6560         //Resend HTLC
6561         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6562         assert_eq!(updates.commitment_signed.htlc_signatures.len(), 1);
6563         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed);
6564         check_added_monitors!(nodes[1], 1);
6565         let _bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6566
6567         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6568
6569         assert!(nodes[1].node.list_channels().is_empty());
6570         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6571         assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str()));
6572         check_added_monitors!(nodes[1], 1);
6573         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6574 }
6575
6576 #[test]
6577 fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() {
6578         //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6579
6580         let chanmon_cfgs = create_chanmon_cfgs(2);
6581         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6582         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6583         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6584         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6585         let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6586         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6587                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6588
6589         check_added_monitors!(nodes[0], 1);
6590         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6591         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6592
6593         let update_msg = msgs::UpdateFulfillHTLC{
6594                 channel_id: chan.2,
6595                 htlc_id: 0,
6596                 payment_preimage: our_payment_preimage,
6597         };
6598
6599         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6600
6601         assert!(nodes[0].node.list_channels().is_empty());
6602         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6603         assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6604         check_added_monitors!(nodes[0], 1);
6605         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6606 }
6607
6608 #[test]
6609 fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() {
6610         //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6611
6612         let chanmon_cfgs = create_chanmon_cfgs(2);
6613         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6614         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6615         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6616         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6617
6618         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6619         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6620                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6621         check_added_monitors!(nodes[0], 1);
6622         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6623         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6624
6625         let update_msg = msgs::UpdateFailHTLC{
6626                 channel_id: chan.2,
6627                 htlc_id: 0,
6628                 reason: msgs::OnionErrorPacket { data: Vec::new()},
6629         };
6630
6631         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6632
6633         assert!(nodes[0].node.list_channels().is_empty());
6634         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6635         assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6636         check_added_monitors!(nodes[0], 1);
6637         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6638 }
6639
6640 #[test]
6641 fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() {
6642         //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6643
6644         let chanmon_cfgs = create_chanmon_cfgs(2);
6645         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6646         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6647         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6648         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6649
6650         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6651         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6652                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6653         check_added_monitors!(nodes[0], 1);
6654         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6655         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6656         let update_msg = msgs::UpdateFailMalformedHTLC{
6657                 channel_id: chan.2,
6658                 htlc_id: 0,
6659                 sha256_of_onion: [1; 32],
6660                 failure_code: 0x8000,
6661         };
6662
6663         nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6664
6665         assert!(nodes[0].node.list_channels().is_empty());
6666         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6667         assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6668         check_added_monitors!(nodes[0], 1);
6669         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6670 }
6671
6672 #[test]
6673 fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
6674         //BOLT 2 Requirement: A receiving node: if the id does not correspond to an HTLC in its current commitment transaction MUST fail the channel.
6675
6676         let chanmon_cfgs = create_chanmon_cfgs(2);
6677         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6678         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6679         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6680         create_announced_chan_between_nodes(&nodes, 0, 1);
6681
6682         let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
6683
6684         nodes[1].node.claim_funds(our_payment_preimage);
6685         check_added_monitors!(nodes[1], 1);
6686         expect_payment_claimed!(nodes[1], our_payment_hash, 100_000);
6687
6688         let events = nodes[1].node.get_and_clear_pending_msg_events();
6689         assert_eq!(events.len(), 1);
6690         let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
6691                 match events[0] {
6692                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6693                                 assert!(update_add_htlcs.is_empty());
6694                                 assert_eq!(update_fulfill_htlcs.len(), 1);
6695                                 assert!(update_fail_htlcs.is_empty());
6696                                 assert!(update_fail_malformed_htlcs.is_empty());
6697                                 assert!(update_fee.is_none());
6698                                 update_fulfill_htlcs[0].clone()
6699                         },
6700                         _ => panic!("Unexpected event"),
6701                 }
6702         };
6703
6704         update_fulfill_msg.htlc_id = 1;
6705
6706         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
6707
6708         assert!(nodes[0].node.list_channels().is_empty());
6709         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6710         assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find");
6711         check_added_monitors!(nodes[0], 1);
6712         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6713 }
6714
6715 #[test]
6716 fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
6717         //BOLT 2 Requirement: A receiving node: if the payment_preimage value in update_fulfill_htlc doesn't SHA256 hash to the corresponding HTLC payment_hash MUST fail the channel.
6718
6719         let chanmon_cfgs = create_chanmon_cfgs(2);
6720         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6721         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6722         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6723         create_announced_chan_between_nodes(&nodes, 0, 1);
6724
6725         let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
6726
6727         nodes[1].node.claim_funds(our_payment_preimage);
6728         check_added_monitors!(nodes[1], 1);
6729         expect_payment_claimed!(nodes[1], our_payment_hash, 100_000);
6730
6731         let events = nodes[1].node.get_and_clear_pending_msg_events();
6732         assert_eq!(events.len(), 1);
6733         let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
6734                 match events[0] {
6735                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6736                                 assert!(update_add_htlcs.is_empty());
6737                                 assert_eq!(update_fulfill_htlcs.len(), 1);
6738                                 assert!(update_fail_htlcs.is_empty());
6739                                 assert!(update_fail_malformed_htlcs.is_empty());
6740                                 assert!(update_fee.is_none());
6741                                 update_fulfill_htlcs[0].clone()
6742                         },
6743                         _ => panic!("Unexpected event"),
6744                 }
6745         };
6746
6747         update_fulfill_msg.payment_preimage = PaymentPreimage([1; 32]);
6748
6749         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
6750
6751         assert!(nodes[0].node.list_channels().is_empty());
6752         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6753         assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str()));
6754         check_added_monitors!(nodes[0], 1);
6755         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6756 }
6757
6758 #[test]
6759 fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_message() {
6760         //BOLT 2 Requirement: A receiving node: if the BADONION bit in failure_code is not set for update_fail_malformed_htlc MUST fail the channel.
6761
6762         let chanmon_cfgs = create_chanmon_cfgs(2);
6763         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6764         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6765         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6766         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6767
6768         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6769         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6770                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6771         check_added_monitors!(nodes[0], 1);
6772
6773         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6774         updates.update_add_htlcs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
6775
6776         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6777         check_added_monitors!(nodes[1], 0);
6778         commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true);
6779
6780         let events = nodes[1].node.get_and_clear_pending_msg_events();
6781
6782         let mut update_msg: msgs::UpdateFailMalformedHTLC = {
6783                 match events[0] {
6784                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6785                                 assert!(update_add_htlcs.is_empty());
6786                                 assert!(update_fulfill_htlcs.is_empty());
6787                                 assert!(update_fail_htlcs.is_empty());
6788                                 assert_eq!(update_fail_malformed_htlcs.len(), 1);
6789                                 assert!(update_fee.is_none());
6790                                 update_fail_malformed_htlcs[0].clone()
6791                         },
6792                         _ => panic!("Unexpected event"),
6793                 }
6794         };
6795         update_msg.failure_code &= !0x8000;
6796         nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6797
6798         assert!(nodes[0].node.list_channels().is_empty());
6799         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6800         assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set");
6801         check_added_monitors!(nodes[0], 1);
6802         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 1000000);
6803 }
6804
6805 #[test]
6806 fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_update_fail_htlc() {
6807         //BOLT 2 Requirement: a receiving node which has an outgoing HTLC canceled by update_fail_malformed_htlc:
6808         //    * MUST return an error in the update_fail_htlc sent to the link which originally sent the HTLC, using the failure_code given and setting the data to sha256_of_onion.
6809
6810         let chanmon_cfgs = create_chanmon_cfgs(3);
6811         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6812         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
6813         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6814         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6815         let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000);
6816
6817         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
6818
6819         //First hop
6820         let mut payment_event = {
6821                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6822                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6823                 check_added_monitors!(nodes[0], 1);
6824                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6825                 assert_eq!(events.len(), 1);
6826                 SendEvent::from_event(events.remove(0))
6827         };
6828         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6829         check_added_monitors!(nodes[1], 0);
6830         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6831         expect_pending_htlcs_forwardable!(nodes[1]);
6832         let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
6833         assert_eq!(events_2.len(), 1);
6834         check_added_monitors!(nodes[1], 1);
6835         payment_event = SendEvent::from_event(events_2.remove(0));
6836         assert_eq!(payment_event.msgs.len(), 1);
6837
6838         //Second Hop
6839         payment_event.msgs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
6840         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
6841         check_added_monitors!(nodes[2], 0);
6842         commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true);
6843
6844         let events_3 = nodes[2].node.get_and_clear_pending_msg_events();
6845         assert_eq!(events_3.len(), 1);
6846         let update_msg : (msgs::UpdateFailMalformedHTLC, msgs::CommitmentSigned) = {
6847                 match events_3[0] {
6848                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
6849                                 assert!(update_add_htlcs.is_empty());
6850                                 assert!(update_fulfill_htlcs.is_empty());
6851                                 assert!(update_fail_htlcs.is_empty());
6852                                 assert_eq!(update_fail_malformed_htlcs.len(), 1);
6853                                 assert!(update_fee.is_none());
6854                                 (update_fail_malformed_htlcs[0].clone(), commitment_signed.clone())
6855                         },
6856                         _ => panic!("Unexpected event"),
6857                 }
6858         };
6859
6860         nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), &update_msg.0);
6861
6862         check_added_monitors!(nodes[1], 0);
6863         commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true);
6864         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
6865         let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
6866         assert_eq!(events_4.len(), 1);
6867
6868         //Confirm that handlinge the update_malformed_htlc message produces an update_fail_htlc message to be forwarded back along the route
6869         match events_4[0] {
6870                 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6871                         assert!(update_add_htlcs.is_empty());
6872                         assert!(update_fulfill_htlcs.is_empty());
6873                         assert_eq!(update_fail_htlcs.len(), 1);
6874                         assert!(update_fail_malformed_htlcs.is_empty());
6875                         assert!(update_fee.is_none());
6876                 },
6877                 _ => panic!("Unexpected event"),
6878         };
6879
6880         check_added_monitors!(nodes[1], 1);
6881 }
6882
6883 #[test]
6884 fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() {
6885         let chanmon_cfgs = create_chanmon_cfgs(3);
6886         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6887         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
6888         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6889         create_announced_chan_between_nodes(&nodes, 0, 1);
6890         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
6891
6892         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100_000);
6893
6894         // First hop
6895         let mut payment_event = {
6896                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6897                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6898                 check_added_monitors!(nodes[0], 1);
6899                 SendEvent::from_node(&nodes[0])
6900         };
6901
6902         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6903         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6904         expect_pending_htlcs_forwardable!(nodes[1]);
6905         check_added_monitors!(nodes[1], 1);
6906         payment_event = SendEvent::from_node(&nodes[1]);
6907         assert_eq!(payment_event.msgs.len(), 1);
6908
6909         // Second Hop
6910         payment_event.msgs[0].onion_routing_packet.version = 1; // Trigger an invalid_onion_version error
6911         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
6912         check_added_monitors!(nodes[2], 0);
6913         commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true);
6914
6915         let events_3 = nodes[2].node.get_and_clear_pending_msg_events();
6916         assert_eq!(events_3.len(), 1);
6917         match events_3[0] {
6918                 MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
6919                         let mut update_msg = updates.update_fail_malformed_htlcs[0].clone();
6920                         // Set the NODE bit (BADONION and PERM already set in invalid_onion_version error)
6921                         update_msg.failure_code |= 0x2000;
6922
6923                         nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), &update_msg);
6924                         commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true);
6925                 },
6926                 _ => panic!("Unexpected event"),
6927         }
6928
6929         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1],
6930                 vec![HTLCDestination::NextHopChannel {
6931                         node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
6932         let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
6933         assert_eq!(events_4.len(), 1);
6934         check_added_monitors!(nodes[1], 1);
6935
6936         match events_4[0] {
6937                 MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
6938                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
6939                         commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true);
6940                 },
6941                 _ => panic!("Unexpected event"),
6942         }
6943
6944         let events_5 = nodes[0].node.get_and_clear_pending_events();
6945         assert_eq!(events_5.len(), 2);
6946
6947         // Expect a PaymentPathFailed event with a ChannelFailure network update for the channel between
6948         // the node originating the error to its next hop.
6949         match events_5[0] {
6950                 Event::PaymentPathFailed { error_code, failure: PathFailure::OnPath { network_update: Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent }) }, ..
6951                 } => {
6952                         assert_eq!(short_channel_id, chan_2.0.contents.short_channel_id);
6953                         assert!(is_permanent);
6954                         assert_eq!(error_code, Some(0x8000|0x4000|0x2000|4));
6955                 },
6956                 _ => panic!("Unexpected event"),
6957         }
6958         match events_5[1] {
6959                 Event::PaymentFailed { payment_hash, .. } => {
6960                         assert_eq!(payment_hash, our_payment_hash);
6961                 },
6962                 _ => panic!("Unexpected event"),
6963         }
6964
6965         // TODO: Test actual removal of channel from NetworkGraph when it's implemented.
6966 }
6967
6968 fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
6969         // Dust-HTLC failure updates must be delayed until failure-trigger tx (in this case local commitment) reach ANTI_REORG_DELAY
6970         // We can have at most two valid local commitment tx, so both cases must be covered, and both txs must be checked to get them all as
6971         // HTLC could have been removed from lastest local commitment tx but still valid until we get remote RAA
6972
6973         let mut chanmon_cfgs = create_chanmon_cfgs(2);
6974         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
6975         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6976         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6977         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6978         let chan =create_announced_chan_between_nodes(&nodes, 0, 1);
6979
6980         let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
6981                 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
6982
6983         // We route 2 dust-HTLCs between A and B
6984         let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
6985         let (_, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
6986         route_payment(&nodes[0], &[&nodes[1]], 1000000);
6987
6988         // Cache one local commitment tx as previous
6989         let as_prev_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
6990
6991         // Fail one HTLC to prune it in the will-be-latest-local commitment tx
6992         nodes[1].node.fail_htlc_backwards(&payment_hash_2);
6993         check_added_monitors!(nodes[1], 0);
6994         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
6995         check_added_monitors!(nodes[1], 1);
6996
6997         let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6998         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &remove.update_fail_htlcs[0]);
6999         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &remove.commitment_signed);
7000         check_added_monitors!(nodes[0], 1);
7001
7002         // Cache one local commitment tx as lastest
7003         let as_last_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
7004
7005         let events = nodes[0].node.get_and_clear_pending_msg_events();
7006         match events[0] {
7007                 MessageSendEvent::SendRevokeAndACK { node_id, .. } => {
7008                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
7009                 },
7010                 _ => panic!("Unexpected event"),
7011         }
7012         match events[1] {
7013                 MessageSendEvent::UpdateHTLCs { node_id, .. } => {
7014                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
7015                 },
7016                 _ => panic!("Unexpected event"),
7017         }
7018
7019         assert_ne!(as_prev_commitment_tx, as_last_commitment_tx);
7020         // Fail the 2 dust-HTLCs, move their failure in maturation buffer (htlc_updated_waiting_threshold_conf)
7021         if announce_latest {
7022                 mine_transaction(&nodes[0], &as_last_commitment_tx[0]);
7023         } else {
7024                 mine_transaction(&nodes[0], &as_prev_commitment_tx[0]);
7025         }
7026
7027         check_closed_broadcast!(nodes[0], true);
7028         check_added_monitors!(nodes[0], 1);
7029         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
7030
7031         assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7032         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7033         let events = nodes[0].node.get_and_clear_pending_events();
7034         // Only 2 PaymentPathFailed events should show up, over-dust HTLC has to be failed by timeout tx
7035         assert_eq!(events.len(), 4);
7036         let mut first_failed = false;
7037         for event in events {
7038                 match event {
7039                         Event::PaymentPathFailed { payment_hash, .. } => {
7040                                 if payment_hash == payment_hash_1 {
7041                                         assert!(!first_failed);
7042                                         first_failed = true;
7043                                 } else {
7044                                         assert_eq!(payment_hash, payment_hash_2);
7045                                 }
7046                         },
7047                         Event::PaymentFailed { .. } => {}
7048                         _ => panic!("Unexpected event"),
7049                 }
7050         }
7051 }
7052
7053 #[test]
7054 fn test_failure_delay_dust_htlc_local_commitment() {
7055         do_test_failure_delay_dust_htlc_local_commitment(true);
7056         do_test_failure_delay_dust_htlc_local_commitment(false);
7057 }
7058
7059 fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
7060         // Outbound HTLC-failure updates must be cancelled if we get a reorg before we reach ANTI_REORG_DELAY.
7061         // Broadcast of revoked remote commitment tx, trigger failure-update of dust/non-dust HTLCs
7062         // Broadcast of remote commitment tx, trigger failure-update of dust-HTLCs
7063         // Broadcast of timeout tx on remote commitment tx, trigger failure-udate of non-dust HTLCs
7064         // Broadcast of local commitment tx, trigger failure-update of dust-HTLCs
7065         // Broadcast of HTLC-timeout tx on local commitment tx, trigger failure-update of non-dust HTLCs
7066
7067         let chanmon_cfgs = create_chanmon_cfgs(3);
7068         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
7069         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
7070         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
7071         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
7072
7073         let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
7074                 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
7075
7076         let (_payment_preimage_1, dust_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
7077         let (_payment_preimage_2, non_dust_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
7078
7079         let as_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
7080         let bs_commitment_tx = get_local_commitment_txn!(nodes[1], chan.2);
7081
7082         // We revoked bs_commitment_tx
7083         if revoked {
7084                 let (payment_preimage_3, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
7085                 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
7086         }
7087
7088         let mut timeout_tx = Vec::new();
7089         if local {
7090                 // We fail dust-HTLC 1 by broadcast of local commitment tx
7091                 mine_transaction(&nodes[0], &as_commitment_tx[0]);
7092                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
7093                 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7094                 expect_payment_failed!(nodes[0], dust_hash, false);
7095
7096                 connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS - ANTI_REORG_DELAY);
7097                 check_closed_broadcast!(nodes[0], true);
7098                 check_added_monitors!(nodes[0], 1);
7099                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7100                 timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
7101                 assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
7102                 // We fail non-dust-HTLC 2 by broadcast of local HTLC-timeout tx on local commitment tx
7103                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7104                 mine_transaction(&nodes[0], &timeout_tx[0]);
7105                 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7106                 expect_payment_failed!(nodes[0], non_dust_hash, false);
7107         } else {
7108                 // We fail dust-HTLC 1 by broadcast of remote commitment tx. If revoked, fail also non-dust HTLC
7109                 mine_transaction(&nodes[0], &bs_commitment_tx[0]);
7110                 check_closed_broadcast!(nodes[0], true);
7111                 check_added_monitors!(nodes[0], 1);
7112                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
7113                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7114
7115                 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
7116                 timeout_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().drain(..)
7117                         .filter(|tx| tx.input[0].previous_output.txid == bs_commitment_tx[0].txid()).collect();
7118                 check_spends!(timeout_tx[0], bs_commitment_tx[0]);
7119                 // For both a revoked or non-revoked commitment transaction, after ANTI_REORG_DELAY the
7120                 // dust HTLC should have been failed.
7121                 expect_payment_failed!(nodes[0], dust_hash, false);
7122
7123                 if !revoked {
7124                         assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
7125                 } else {
7126                         assert_eq!(timeout_tx[0].lock_time.to_consensus_u32(), 11);
7127                 }
7128                 // We fail non-dust-HTLC 2 by broadcast of local timeout/revocation-claim tx
7129                 mine_transaction(&nodes[0], &timeout_tx[0]);
7130                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7131                 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7132                 expect_payment_failed!(nodes[0], non_dust_hash, false);
7133         }
7134 }
7135
7136 #[test]
7137 fn test_sweep_outbound_htlc_failure_update() {
7138         do_test_sweep_outbound_htlc_failure_update(false, true);
7139         do_test_sweep_outbound_htlc_failure_update(false, false);
7140         do_test_sweep_outbound_htlc_failure_update(true, false);
7141 }
7142
7143 #[test]
7144 fn test_user_configurable_csv_delay() {
7145         // We test our channel constructors yield errors when we pass them absurd csv delay
7146
7147         let mut low_our_to_self_config = UserConfig::default();
7148         low_our_to_self_config.channel_handshake_config.our_to_self_delay = 6;
7149         let mut high_their_to_self_config = UserConfig::default();
7150         high_their_to_self_config.channel_handshake_limits.their_to_self_delay = 100;
7151         let user_cfgs = [Some(high_their_to_self_config.clone()), None];
7152         let chanmon_cfgs = create_chanmon_cfgs(2);
7153         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7154         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
7155         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7156
7157         // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in OutboundV1Channel::new()
7158         if let Err(error) = OutboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
7159                 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[1].node.init_features(), 1000000, 1000000, 0,
7160                 &low_our_to_self_config, 0, 42, None)
7161         {
7162                 match error {
7163                         APIError::APIMisuseError { err } => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); },
7164                         _ => panic!("Unexpected event"),
7165                 }
7166         } else { assert!(false) }
7167
7168         // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in InboundV1Channel::new()
7169         nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
7170         let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
7171         open_channel.to_self_delay = 200;
7172         if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
7173                 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
7174                 &low_our_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
7175         {
7176                 match error {
7177                         ChannelError::Close(err) => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str()));  },
7178                         _ => panic!("Unexpected event"),
7179                 }
7180         } else { assert!(false); }
7181
7182         // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Chanel::accept_channel()
7183         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
7184         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
7185         let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
7186         accept_channel.to_self_delay = 200;
7187         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
7188         let reason_msg;
7189         if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] {
7190                 match action {
7191                         &ErrorAction::SendErrorMessage { ref msg } => {
7192                                 assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(msg.data.as_str()));
7193                                 reason_msg = msg.data.clone();
7194                         },
7195                         _ => { panic!(); }
7196                 }
7197         } else { panic!(); }
7198         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg }, [nodes[1].node.get_our_node_id()], 1000000);
7199
7200         // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new()
7201         nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
7202         let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
7203         open_channel.to_self_delay = 200;
7204         if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
7205                 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
7206                 &high_their_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
7207         {
7208                 match error {
7209                         ChannelError::Close(err) => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(err.as_str())); },
7210                         _ => panic!("Unexpected event"),
7211                 }
7212         } else { assert!(false); }
7213 }
7214
7215 #[test]
7216 fn test_check_htlc_underpaying() {
7217         // Send payment through A -> B but A is maliciously
7218         // sending a probe payment (i.e less than expected value0
7219         // to B, B should refuse payment.
7220
7221         let chanmon_cfgs = create_chanmon_cfgs(2);
7222         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7223         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7224         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7225
7226         // Create some initial channels
7227         create_announced_chan_between_nodes(&nodes, 0, 1);
7228
7229         let scorer = test_utils::TestScorer::new();
7230         let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
7231         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
7232                 .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
7233         let route_params = RouteParameters::from_payment_params_and_value(payment_params, 10_000);
7234         let route = get_route(&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(),
7235                 None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
7236         let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]);
7237         let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, None).unwrap();
7238         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
7239                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
7240         check_added_monitors!(nodes[0], 1);
7241
7242         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
7243         assert_eq!(events.len(), 1);
7244         let mut payment_event = SendEvent::from_event(events.pop().unwrap());
7245         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
7246         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
7247
7248         // Note that we first have to wait a random delay before processing the receipt of the HTLC,
7249         // and then will wait a second random delay before failing the HTLC back:
7250         expect_pending_htlcs_forwardable!(nodes[1]);
7251         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
7252
7253         // Node 3 is expecting payment of 100_000 but received 10_000,
7254         // it should fail htlc like we didn't know the preimage.
7255         nodes[1].node.process_pending_htlc_forwards();
7256
7257         let events = nodes[1].node.get_and_clear_pending_msg_events();
7258         assert_eq!(events.len(), 1);
7259         let (update_fail_htlc, commitment_signed) = match events[0] {
7260                 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
7261                         assert!(update_add_htlcs.is_empty());
7262                         assert!(update_fulfill_htlcs.is_empty());
7263                         assert_eq!(update_fail_htlcs.len(), 1);
7264                         assert!(update_fail_malformed_htlcs.is_empty());
7265                         assert!(update_fee.is_none());
7266                         (update_fail_htlcs[0].clone(), commitment_signed)
7267                 },
7268                 _ => panic!("Unexpected event"),
7269         };
7270         check_added_monitors!(nodes[1], 1);
7271
7272         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlc);
7273         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
7274
7275         // 10_000 msat as u64, followed by a height of CHAN_CONFIRM_DEPTH as u32
7276         let mut expected_failure_data = (10_000 as u64).to_be_bytes().to_vec();
7277         expected_failure_data.extend_from_slice(&CHAN_CONFIRM_DEPTH.to_be_bytes());
7278         expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000|15, &expected_failure_data[..]);
7279 }
7280
7281 #[test]
7282 fn test_announce_disable_channels() {
7283         // Create 2 channels between A and B. Disconnect B. Call timer_tick_occurred and check for generated
7284         // ChannelUpdate. Reconnect B, reestablish and check there is non-generated ChannelUpdate.
7285
7286         let chanmon_cfgs = create_chanmon_cfgs(2);
7287         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7288         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7289         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7290
7291         create_announced_chan_between_nodes(&nodes, 0, 1);
7292         create_announced_chan_between_nodes(&nodes, 1, 0);
7293         create_announced_chan_between_nodes(&nodes, 0, 1);
7294
7295         // Disconnect peers
7296         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
7297         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
7298
7299         for _ in 0..DISABLE_GOSSIP_TICKS + 1 {
7300                 nodes[0].node.timer_tick_occurred();
7301         }
7302         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
7303         assert_eq!(msg_events.len(), 3);
7304         let mut chans_disabled = HashMap::new();
7305         for e in msg_events {
7306                 match e {
7307                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
7308                                 assert_eq!(msg.contents.flags & (1<<1), 1<<1); // The "channel disabled" bit should be set
7309                                 // Check that each channel gets updated exactly once
7310                                 if chans_disabled.insert(msg.contents.short_channel_id, msg.contents.timestamp).is_some() {
7311                                         panic!("Generated ChannelUpdate for wrong chan!");
7312                                 }
7313                         },
7314                         _ => panic!("Unexpected event"),
7315                 }
7316         }
7317         // Reconnect peers
7318         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
7319                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
7320         }, true).unwrap();
7321         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
7322         assert_eq!(reestablish_1.len(), 3);
7323         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
7324                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
7325         }, false).unwrap();
7326         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
7327         assert_eq!(reestablish_2.len(), 3);
7328
7329         // Reestablish chan_1
7330         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
7331         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7332         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
7333         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7334         // Reestablish chan_2
7335         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[1]);
7336         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7337         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[1]);
7338         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7339         // Reestablish chan_3
7340         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[2]);
7341         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7342         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[2]);
7343         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7344
7345         for _ in 0..ENABLE_GOSSIP_TICKS {
7346                 nodes[0].node.timer_tick_occurred();
7347         }
7348         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7349         nodes[0].node.timer_tick_occurred();
7350         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
7351         assert_eq!(msg_events.len(), 3);
7352         for e in msg_events {
7353                 match e {
7354                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
7355                                 assert_eq!(msg.contents.flags & (1<<1), 0); // The "channel disabled" bit should be off
7356                                 match chans_disabled.remove(&msg.contents.short_channel_id) {
7357                                         // Each update should have a higher timestamp than the previous one, replacing
7358                                         // the old one.
7359                                         Some(prev_timestamp) => assert!(msg.contents.timestamp > prev_timestamp),
7360                                         None => panic!("Generated ChannelUpdate for wrong chan!"),
7361                                 }
7362                         },
7363                         _ => panic!("Unexpected event"),
7364                 }
7365         }
7366         // Check that each channel gets updated exactly once
7367         assert!(chans_disabled.is_empty());
7368 }
7369
7370 #[test]
7371 fn test_bump_penalty_txn_on_revoked_commitment() {
7372         // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to be sure
7373         // we're able to claim outputs on revoked commitment transaction before timelocks expiration
7374
7375         let chanmon_cfgs = create_chanmon_cfgs(2);
7376         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7377         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7378         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7379
7380         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7381
7382         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
7383         let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 30)
7384                 .with_bolt11_features(nodes[0].node.bolt11_invoice_features()).unwrap();
7385         let (route,_, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], payment_params, 3000000);
7386         send_along_route(&nodes[1], route, &vec!(&nodes[0])[..], 3000000);
7387
7388         let revoked_txn = get_local_commitment_txn!(nodes[0], chan.2);
7389         // Revoked commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
7390         assert_eq!(revoked_txn[0].output.len(), 4);
7391         assert_eq!(revoked_txn[0].input.len(), 1);
7392         assert_eq!(revoked_txn[0].input[0].previous_output.txid, chan.3.txid());
7393         let revoked_txid = revoked_txn[0].txid();
7394
7395         let mut penalty_sum = 0;
7396         for outp in revoked_txn[0].output.iter() {
7397                 if outp.script_pubkey.is_v0_p2wsh() {
7398                         penalty_sum += outp.value;
7399                 }
7400         }
7401
7402         // Connect blocks to change height_timer range to see if we use right soonest_timelock
7403         let header_114 = connect_blocks(&nodes[1], 14);
7404
7405         // Actually revoke tx by claiming a HTLC
7406         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
7407         connect_block(&nodes[1], &create_dummy_block(header_114, 42, vec![revoked_txn[0].clone()]));
7408         check_added_monitors!(nodes[1], 1);
7409
7410         // One or more justice tx should have been broadcast, check it
7411         let penalty_1;
7412         let feerate_1;
7413         {
7414                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7415                 assert_eq!(node_txn.len(), 1); // justice tx (broadcasted from ChannelMonitor)
7416                 assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7417                 assert_eq!(node_txn[0].output.len(), 1);
7418                 check_spends!(node_txn[0], revoked_txn[0]);
7419                 let fee_1 = penalty_sum - node_txn[0].output[0].value;
7420                 feerate_1 = fee_1 * 1000 / node_txn[0].weight().to_wu();
7421                 penalty_1 = node_txn[0].txid();
7422                 node_txn.clear();
7423         };
7424
7425         // After exhaustion of height timer, a new bumped justice tx should have been broadcast, check it
7426         connect_blocks(&nodes[1], 15);
7427         let mut penalty_2 = penalty_1;
7428         let mut feerate_2 = 0;
7429         {
7430                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7431                 assert_eq!(node_txn.len(), 1);
7432                 if node_txn[0].input[0].previous_output.txid == revoked_txid {
7433                         assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7434                         assert_eq!(node_txn[0].output.len(), 1);
7435                         check_spends!(node_txn[0], revoked_txn[0]);
7436                         penalty_2 = node_txn[0].txid();
7437                         // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
7438                         assert_ne!(penalty_2, penalty_1);
7439                         let fee_2 = penalty_sum - node_txn[0].output[0].value;
7440                         feerate_2 = fee_2 * 1000 / node_txn[0].weight().to_wu();
7441                         // Verify 25% bump heuristic
7442                         assert!(feerate_2 * 100 >= feerate_1 * 125);
7443                         node_txn.clear();
7444                 }
7445         }
7446         assert_ne!(feerate_2, 0);
7447
7448         // After exhaustion of height timer for a 2nd time, a new bumped justice tx should have been broadcast, check it
7449         connect_blocks(&nodes[1], 1);
7450         let penalty_3;
7451         let mut feerate_3 = 0;
7452         {
7453                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7454                 assert_eq!(node_txn.len(), 1);
7455                 if node_txn[0].input[0].previous_output.txid == revoked_txid {
7456                         assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7457                         assert_eq!(node_txn[0].output.len(), 1);
7458                         check_spends!(node_txn[0], revoked_txn[0]);
7459                         penalty_3 = node_txn[0].txid();
7460                         // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
7461                         assert_ne!(penalty_3, penalty_2);
7462                         let fee_3 = penalty_sum - node_txn[0].output[0].value;
7463                         feerate_3 = fee_3 * 1000 / node_txn[0].weight().to_wu();
7464                         // Verify 25% bump heuristic
7465                         assert!(feerate_3 * 100 >= feerate_2 * 125);
7466                         node_txn.clear();
7467                 }
7468         }
7469         assert_ne!(feerate_3, 0);
7470
7471         nodes[1].node.get_and_clear_pending_events();
7472         nodes[1].node.get_and_clear_pending_msg_events();
7473 }
7474
7475 #[test]
7476 fn test_bump_penalty_txn_on_revoked_htlcs() {
7477         // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to sure
7478         // we're able to claim outputs on revoked HTLC transactions before timelocks expiration
7479
7480         let mut chanmon_cfgs = create_chanmon_cfgs(2);
7481         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
7482         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7483         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7484         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7485
7486         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7487         // Lock HTLC in both directions (using a slightly lower CLTV delay to provide timely RBF bumps)
7488         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 50).with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
7489         let scorer = test_utils::TestScorer::new();
7490         let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
7491         let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
7492         let route = get_route(&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(), None,
7493                 nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
7494         let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 3_000_000).0;
7495         let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 50)
7496                 .with_bolt11_features(nodes[0].node.bolt11_invoice_features()).unwrap();
7497         let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
7498         let route = get_route(&nodes[1].node.get_our_node_id(), &route_params, &nodes[1].network_graph.read_only(), None,
7499                 nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
7500         send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000);
7501
7502         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
7503         assert_eq!(revoked_local_txn[0].input.len(), 1);
7504         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
7505
7506         // Revoke local commitment tx
7507         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
7508
7509         // B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx
7510         connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone()]));
7511         check_closed_broadcast!(nodes[1], true);
7512         check_added_monitors!(nodes[1], 1);
7513         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000);
7514         connect_blocks(&nodes[1], 50); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
7515
7516         let revoked_htlc_txn = {
7517                 let txn = nodes[1].tx_broadcaster.unique_txn_broadcast();
7518                 assert_eq!(txn.len(), 2);
7519
7520                 assert_eq!(txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
7521                 assert_eq!(txn[0].input.len(), 1);
7522                 check_spends!(txn[0], revoked_local_txn[0]);
7523
7524                 assert_eq!(txn[1].input.len(), 1);
7525                 assert_eq!(txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
7526                 assert_eq!(txn[1].output.len(), 1);
7527                 check_spends!(txn[1], revoked_local_txn[0]);
7528
7529                 txn
7530         };
7531
7532         // Broadcast set of revoked txn on A
7533         let hash_128 = connect_blocks(&nodes[0], 40);
7534         let block_11 = create_dummy_block(hash_128, 42, vec![revoked_local_txn[0].clone()]);
7535         connect_block(&nodes[0], &block_11);
7536         let block_129 = create_dummy_block(block_11.block_hash(), 42, vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()]);
7537         connect_block(&nodes[0], &block_129);
7538         let events = nodes[0].node.get_and_clear_pending_events();
7539         expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
7540         match events.last().unwrap() {
7541                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
7542                 _ => panic!("Unexpected event"),
7543         }
7544         let first;
7545         let feerate_1;
7546         let penalty_txn;
7547         {
7548                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7549                 assert_eq!(node_txn.len(), 4); // 3 penalty txn on revoked commitment tx + 1 penalty tnx on revoked HTLC txn
7550                 // Verify claim tx are spending revoked HTLC txn
7551
7552                 // node_txn 0-2 each spend a separate revoked output from revoked_local_txn[0]
7553                 // Note that node_txn[0] and node_txn[1] are bogus - they double spend the revoked_htlc_txn
7554                 // which are included in the same block (they are broadcasted because we scan the
7555                 // transactions linearly and generate claims as we go, they likely should be removed in the
7556                 // future).
7557                 assert_eq!(node_txn[0].input.len(), 1);
7558                 check_spends!(node_txn[0], revoked_local_txn[0]);
7559                 assert_eq!(node_txn[1].input.len(), 1);
7560                 check_spends!(node_txn[1], revoked_local_txn[0]);
7561                 assert_eq!(node_txn[2].input.len(), 1);
7562                 check_spends!(node_txn[2], revoked_local_txn[0]);
7563
7564                 // Each of the three justice transactions claim a separate (single) output of the three
7565                 // available, which we check here:
7566                 assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
7567                 assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output);
7568                 assert_ne!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output);
7569
7570                 assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[1].input[0].previous_output);
7571                 assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
7572
7573                 // node_txn[3] spends the revoked outputs from the revoked_htlc_txn (which only have one
7574                 // output, checked above).
7575                 assert_eq!(node_txn[3].input.len(), 2);
7576                 assert_eq!(node_txn[3].output.len(), 1);
7577                 check_spends!(node_txn[3], revoked_htlc_txn[0], revoked_htlc_txn[1]);
7578
7579                 first = node_txn[3].txid();
7580                 // Store both feerates for later comparison
7581                 let fee_1 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[3].output[0].value;
7582                 feerate_1 = fee_1 * 1000 / node_txn[3].weight().to_wu();
7583                 penalty_txn = vec![node_txn[2].clone()];
7584                 node_txn.clear();
7585         }
7586
7587         // Connect one more block to see if bumped penalty are issued for HTLC txn
7588         let block_130 = create_dummy_block(block_129.block_hash(), 42, penalty_txn);
7589         connect_block(&nodes[0], &block_130);
7590         let block_131 = create_dummy_block(block_130.block_hash(), 42, Vec::new());
7591         connect_block(&nodes[0], &block_131);
7592
7593         // Few more blocks to confirm penalty txn
7594         connect_blocks(&nodes[0], 4);
7595         assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
7596         let header_144 = connect_blocks(&nodes[0], 9);
7597         let node_txn = {
7598                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7599                 assert_eq!(node_txn.len(), 1);
7600
7601                 assert_eq!(node_txn[0].input.len(), 2);
7602                 check_spends!(node_txn[0], revoked_htlc_txn[0], revoked_htlc_txn[1]);
7603                 // Verify bumped tx is different and 25% bump heuristic
7604                 assert_ne!(first, node_txn[0].txid());
7605                 let fee_2 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[0].output[0].value;
7606                 let feerate_2 = fee_2 * 1000 / node_txn[0].weight().to_wu();
7607                 assert!(feerate_2 * 100 > feerate_1 * 125);
7608                 let txn = vec![node_txn[0].clone()];
7609                 node_txn.clear();
7610                 txn
7611         };
7612         // Broadcast claim txn and confirm blocks to avoid further bumps on this outputs
7613         connect_block(&nodes[0], &create_dummy_block(header_144, 42, node_txn));
7614         connect_blocks(&nodes[0], 20);
7615         {
7616                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7617                 // We verify than no new transaction has been broadcast because previously
7618                 // we were buggy on this exact behavior by not tracking for monitoring remote HTLC outputs (see #411)
7619                 // which means we wouldn't see a spend of them by a justice tx and bumped justice tx
7620                 // were generated forever instead of safe cleaning after confirmation and ANTI_REORG_SAFE_DELAY blocks.
7621                 // Enforce spending of revoked htlc output by claiming transaction remove request as expected and dry
7622                 // up bumped justice generation.
7623                 assert_eq!(node_txn.len(), 0);
7624                 node_txn.clear();
7625         }
7626         check_closed_broadcast!(nodes[0], true);
7627         check_added_monitors!(nodes[0], 1);
7628 }
7629
7630 #[test]
7631 fn test_bump_penalty_txn_on_remote_commitment() {
7632         // In case of claim txn with too low feerates for getting into mempools, RBF-bump them to be sure
7633         // we're able to claim outputs on remote commitment transaction before timelocks expiration
7634
7635         // Create 2 HTLCs
7636         // Provide preimage for one
7637         // Check aggregation
7638
7639         let chanmon_cfgs = create_chanmon_cfgs(2);
7640         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7641         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7642         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7643
7644         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7645         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
7646         route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
7647
7648         // Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
7649         let remote_txn = get_local_commitment_txn!(nodes[0], chan.2);
7650         assert_eq!(remote_txn[0].output.len(), 4);
7651         assert_eq!(remote_txn[0].input.len(), 1);
7652         assert_eq!(remote_txn[0].input[0].previous_output.txid, chan.3.txid());
7653
7654         // Claim a HTLC without revocation (provide B monitor with preimage)
7655         nodes[1].node.claim_funds(payment_preimage);
7656         expect_payment_claimed!(nodes[1], payment_hash, 3_000_000);
7657         mine_transaction(&nodes[1], &remote_txn[0]);
7658         check_added_monitors!(nodes[1], 2);
7659         connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
7660
7661         // One or more claim tx should have been broadcast, check it
7662         let timeout;
7663         let preimage;
7664         let preimage_bump;
7665         let feerate_timeout;
7666         let feerate_preimage;
7667         {
7668                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7669                 // 3 transactions including:
7670                 //   preimage and timeout sweeps from remote commitment + preimage sweep bump
7671                 assert_eq!(node_txn.len(), 3);
7672                 assert_eq!(node_txn[0].input.len(), 1);
7673                 assert_eq!(node_txn[1].input.len(), 1);
7674                 assert_eq!(node_txn[2].input.len(), 1);
7675                 check_spends!(node_txn[0], remote_txn[0]);
7676                 check_spends!(node_txn[1], remote_txn[0]);
7677                 check_spends!(node_txn[2], remote_txn[0]);
7678
7679                 preimage = node_txn[0].txid();
7680                 let index = node_txn[0].input[0].previous_output.vout;
7681                 let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
7682                 feerate_preimage = fee * 1000 / node_txn[0].weight().to_wu();
7683
7684                 let (preimage_bump_tx, timeout_tx) = if node_txn[2].input[0].previous_output == node_txn[0].input[0].previous_output {
7685                         (node_txn[2].clone(), node_txn[1].clone())
7686                 } else {
7687                         (node_txn[1].clone(), node_txn[2].clone())
7688                 };
7689
7690                 preimage_bump = preimage_bump_tx;
7691                 check_spends!(preimage_bump, remote_txn[0]);
7692                 assert_eq!(node_txn[0].input[0].previous_output, preimage_bump.input[0].previous_output);
7693
7694                 timeout = timeout_tx.txid();
7695                 let index = timeout_tx.input[0].previous_output.vout;
7696                 let fee = remote_txn[0].output[index as usize].value - timeout_tx.output[0].value;
7697                 feerate_timeout = fee * 1000 / timeout_tx.weight().to_wu();
7698
7699                 node_txn.clear();
7700         };
7701         assert_ne!(feerate_timeout, 0);
7702         assert_ne!(feerate_preimage, 0);
7703
7704         // After exhaustion of height timer, new bumped claim txn should have been broadcast, check it
7705         connect_blocks(&nodes[1], 1);
7706         {
7707                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7708                 assert_eq!(node_txn.len(), 1);
7709                 assert_eq!(node_txn[0].input.len(), 1);
7710                 assert_eq!(preimage_bump.input.len(), 1);
7711                 check_spends!(node_txn[0], remote_txn[0]);
7712                 check_spends!(preimage_bump, remote_txn[0]);
7713
7714                 let index = preimage_bump.input[0].previous_output.vout;
7715                 let fee = remote_txn[0].output[index as usize].value - preimage_bump.output[0].value;
7716                 let new_feerate = fee * 1000 / preimage_bump.weight().to_wu();
7717                 assert!(new_feerate * 100 > feerate_timeout * 125);
7718                 assert_ne!(timeout, preimage_bump.txid());
7719
7720                 let index = node_txn[0].input[0].previous_output.vout;
7721                 let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
7722                 let new_feerate = fee * 1000 / node_txn[0].weight().to_wu();
7723                 assert!(new_feerate * 100 > feerate_preimage * 125);
7724                 assert_ne!(preimage, node_txn[0].txid());
7725
7726                 node_txn.clear();
7727         }
7728
7729         nodes[1].node.get_and_clear_pending_events();
7730         nodes[1].node.get_and_clear_pending_msg_events();
7731 }
7732
7733 #[test]
7734 fn test_counterparty_raa_skip_no_crash() {
7735         // Previously, if our counterparty sent two RAAs in a row without us having provided a
7736         // commitment transaction, we would have happily carried on and provided them the next
7737         // commitment transaction based on one RAA forward. This would probably eventually have led to
7738         // channel closure, but it would not have resulted in funds loss. Still, our
7739         // TestChannelSigner would have panicked as it doesn't like jumps into the future. Here, we
7740         // check simply that the channel is closed in response to such an RAA, but don't check whether
7741         // we decide to punish our counterparty for revoking their funds (as we don't currently
7742         // implement that).
7743         let chanmon_cfgs = create_chanmon_cfgs(2);
7744         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7745         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7746         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7747         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
7748
7749         let per_commitment_secret;
7750         let next_per_commitment_point;
7751         {
7752                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
7753                 let mut guard = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
7754                 let keys = guard.channel_by_id.get_mut(&channel_id).map(
7755                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
7756                 ).flatten().unwrap().get_signer();
7757
7758                 const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
7759
7760                 // Make signer believe we got a counterparty signature, so that it allows the revocation
7761                 keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
7762                 per_commitment_secret = keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
7763
7764                 // Must revoke without gaps
7765                 keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
7766                 keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
7767
7768                 keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
7769                 next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
7770                         &SecretKey::from_slice(&keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
7771         }
7772
7773         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(),
7774                 &msgs::RevokeAndACK {
7775                         channel_id,
7776                         per_commitment_secret,
7777                         next_per_commitment_point,
7778                         #[cfg(taproot)]
7779                         next_local_nonce: None,
7780                 });
7781         assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
7782         check_added_monitors!(nodes[1], 1);
7783         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() }
7784                 , [nodes[0].node.get_our_node_id()], 100000);
7785 }
7786
7787 #[test]
7788 fn test_bump_txn_sanitize_tracking_maps() {
7789         // Sanitizing pendning_claim_request and claimable_outpoints used to be buggy,
7790         // verify we clean then right after expiration of ANTI_REORG_DELAY.
7791
7792         let chanmon_cfgs = create_chanmon_cfgs(2);
7793         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7794         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7795         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7796
7797         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7798         // Lock HTLC in both directions
7799         let (payment_preimage_1, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000);
7800         let (_, payment_hash_2, ..) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000);
7801
7802         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
7803         assert_eq!(revoked_local_txn[0].input.len(), 1);
7804         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
7805
7806         // Revoke local commitment tx
7807         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
7808
7809         // Broadcast set of revoked txn on A
7810         connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH);
7811         expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[0], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
7812         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
7813
7814         mine_transaction(&nodes[0], &revoked_local_txn[0]);
7815         check_closed_broadcast!(nodes[0], true);
7816         check_added_monitors!(nodes[0], 1);
7817         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 1000000);
7818         let penalty_txn = {
7819                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7820                 assert_eq!(node_txn.len(), 3); //ChannelMonitor: justice txn * 3
7821                 check_spends!(node_txn[0], revoked_local_txn[0]);
7822                 check_spends!(node_txn[1], revoked_local_txn[0]);
7823                 check_spends!(node_txn[2], revoked_local_txn[0]);
7824                 let penalty_txn = vec![node_txn[0].clone(), node_txn[1].clone(), node_txn[2].clone()];
7825                 node_txn.clear();
7826                 penalty_txn
7827         };
7828         connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, penalty_txn));
7829         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7830         {
7831                 let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(OutPoint { txid: chan.3.txid(), index: 0 }).unwrap();
7832                 assert!(monitor.inner.lock().unwrap().onchain_tx_handler.pending_claim_requests.is_empty());
7833                 assert!(monitor.inner.lock().unwrap().onchain_tx_handler.claimable_outpoints.is_empty());
7834         }
7835 }
7836
7837 #[test]
7838 fn test_channel_conf_timeout() {
7839         // Tests that, for inbound channels, we give up on them if the funding transaction does not
7840         // confirm within 2016 blocks, as recommended by BOLT 2.
7841         let chanmon_cfgs = create_chanmon_cfgs(2);
7842         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7843         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7844         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7845
7846         let _funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 100_000);
7847
7848         // The outbound node should wait forever for confirmation:
7849         // This matches `channel::FUNDING_CONF_DEADLINE_BLOCKS` and BOLT 2's suggested timeout, thus is
7850         // copied here instead of directly referencing the constant.
7851         connect_blocks(&nodes[0], 2016);
7852         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7853
7854         // The inbound node should fail the channel after exactly 2016 blocks
7855         connect_blocks(&nodes[1], 2015);
7856         check_added_monitors!(nodes[1], 0);
7857         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7858
7859         connect_blocks(&nodes[1], 1);
7860         check_added_monitors!(nodes[1], 1);
7861         check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut, [nodes[0].node.get_our_node_id()], 1000000);
7862         let close_ev = nodes[1].node.get_and_clear_pending_msg_events();
7863         assert_eq!(close_ev.len(), 1);
7864         match close_ev[0] {
7865                 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { ref msg }, ref node_id } => {
7866                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
7867                         assert_eq!(msg.as_ref().unwrap().data, "Channel closed because funding transaction failed to confirm within 2016 blocks");
7868                 },
7869                 _ => panic!("Unexpected event"),
7870         }
7871 }
7872
7873 #[test]
7874 fn test_override_channel_config() {
7875         let chanmon_cfgs = create_chanmon_cfgs(2);
7876         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7877         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7878         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7879
7880         // Node0 initiates a channel to node1 using the override config.
7881         let mut override_config = UserConfig::default();
7882         override_config.channel_handshake_config.our_to_self_delay = 200;
7883
7884         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, None, Some(override_config)).unwrap();
7885
7886         // Assert the channel created by node0 is using the override config.
7887         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7888         assert_eq!(res.channel_flags, 0);
7889         assert_eq!(res.to_self_delay, 200);
7890 }
7891
7892 #[test]
7893 fn test_override_0msat_htlc_minimum() {
7894         let mut zero_config = UserConfig::default();
7895         zero_config.channel_handshake_config.our_htlc_minimum_msat = 0;
7896         let chanmon_cfgs = create_chanmon_cfgs(2);
7897         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7898         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(zero_config.clone())]);
7899         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7900
7901         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, None, Some(zero_config)).unwrap();
7902         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7903         assert_eq!(res.htlc_minimum_msat, 1);
7904
7905         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
7906         let res = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
7907         assert_eq!(res.htlc_minimum_msat, 1);
7908 }
7909
7910 #[test]
7911 fn test_channel_update_has_correct_htlc_maximum_msat() {
7912         // Tests that the `ChannelUpdate` message has the correct values for `htlc_maximum_msat` set.
7913         // Bolt 7 specifies that if present `htlc_maximum_msat`:
7914         // 1. MUST be set to less than or equal to the channel capacity. In LDK, this is capped to
7915         // 90% of the `channel_value`.
7916         // 2. MUST be set to less than or equal to the `max_htlc_value_in_flight_msat` received from the peer.
7917
7918         let mut config_30_percent = UserConfig::default();
7919         config_30_percent.channel_handshake_config.announced_channel = true;
7920         config_30_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 30;
7921         let mut config_50_percent = UserConfig::default();
7922         config_50_percent.channel_handshake_config.announced_channel = true;
7923         config_50_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 50;
7924         let mut config_95_percent = UserConfig::default();
7925         config_95_percent.channel_handshake_config.announced_channel = true;
7926         config_95_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 95;
7927         let mut config_100_percent = UserConfig::default();
7928         config_100_percent.channel_handshake_config.announced_channel = true;
7929         config_100_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100;
7930
7931         let chanmon_cfgs = create_chanmon_cfgs(4);
7932         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
7933         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[Some(config_30_percent), Some(config_50_percent), Some(config_95_percent), Some(config_100_percent)]);
7934         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
7935
7936         let channel_value_satoshis = 100000;
7937         let channel_value_msat = channel_value_satoshis * 1000;
7938         let channel_value_30_percent_msat = (channel_value_msat as f64 * 0.3) as u64;
7939         let channel_value_50_percent_msat = (channel_value_msat as f64 * 0.5) as u64;
7940         let channel_value_90_percent_msat = (channel_value_msat as f64 * 0.9) as u64;
7941
7942         let (node_0_chan_update, node_1_chan_update, _, _)  = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value_satoshis, 10001);
7943         let (node_2_chan_update, node_3_chan_update, _, _)  = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, channel_value_satoshis, 10001);
7944
7945         // Assert that `node[0]`'s `ChannelUpdate` is capped at 50 percent of the `channel_value`, as
7946         // that's the value of `node[1]`'s `holder_max_htlc_value_in_flight_msat`.
7947         assert_eq!(node_0_chan_update.contents.htlc_maximum_msat, channel_value_50_percent_msat);
7948         // Assert that `node[1]`'s `ChannelUpdate` is capped at 30 percent of the `channel_value`, as
7949         // that's the value of `node[0]`'s `holder_max_htlc_value_in_flight_msat`.
7950         assert_eq!(node_1_chan_update.contents.htlc_maximum_msat, channel_value_30_percent_msat);
7951
7952         // Assert that `node[2]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
7953         // the value of `node[3]`'s `holder_max_htlc_value_in_flight_msat` (100%), exceeds 90% of the
7954         // `channel_value`.
7955         assert_eq!(node_2_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
7956         // Assert that `node[3]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
7957         // the value of `node[2]`'s `holder_max_htlc_value_in_flight_msat` (95%), exceeds 90% of the
7958         // `channel_value`.
7959         assert_eq!(node_3_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
7960 }
7961
7962 #[test]
7963 fn test_manually_accept_inbound_channel_request() {
7964         let mut manually_accept_conf = UserConfig::default();
7965         manually_accept_conf.manually_accept_inbound_channels = true;
7966         let chanmon_cfgs = create_chanmon_cfgs(2);
7967         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7968         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
7969         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7970
7971         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap();
7972         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7973
7974         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
7975
7976         // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
7977         // accepting the inbound channel request.
7978         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
7979
7980         let events = nodes[1].node.get_and_clear_pending_events();
7981         match events[0] {
7982                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
7983                         nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23).unwrap();
7984                 }
7985                 _ => panic!("Unexpected event"),
7986         }
7987
7988         let accept_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
7989         assert_eq!(accept_msg_ev.len(), 1);
7990
7991         match accept_msg_ev[0] {
7992                 MessageSendEvent::SendAcceptChannel { ref node_id, .. } => {
7993                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
7994                 }
7995                 _ => panic!("Unexpected event"),
7996         }
7997
7998         nodes[1].node.force_close_broadcasting_latest_txn(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
7999
8000         let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
8001         assert_eq!(close_msg_ev.len(), 1);
8002
8003         let events = nodes[1].node.get_and_clear_pending_events();
8004         match events[0] {
8005                 Event::ChannelClosed { user_channel_id, .. } => {
8006                         assert_eq!(user_channel_id, 23);
8007                 }
8008                 _ => panic!("Unexpected event"),
8009         }
8010 }
8011
8012 #[test]
8013 fn test_manually_reject_inbound_channel_request() {
8014         let mut manually_accept_conf = UserConfig::default();
8015         manually_accept_conf.manually_accept_inbound_channels = true;
8016         let chanmon_cfgs = create_chanmon_cfgs(2);
8017         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8018         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
8019         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8020
8021         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap();
8022         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8023
8024         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
8025
8026         // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
8027         // rejecting the inbound channel request.
8028         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
8029
8030         let events = nodes[1].node.get_and_clear_pending_events();
8031         match events[0] {
8032                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
8033                         nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
8034                 }
8035                 _ => panic!("Unexpected event"),
8036         }
8037
8038         let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
8039         assert_eq!(close_msg_ev.len(), 1);
8040
8041         match close_msg_ev[0] {
8042                 MessageSendEvent::HandleError { ref node_id, .. } => {
8043                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8044                 }
8045                 _ => panic!("Unexpected event"),
8046         }
8047
8048         // There should be no more events to process, as the channel was never opened.
8049         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
8050 }
8051
8052 #[test]
8053 fn test_can_not_accept_inbound_channel_twice() {
8054         let mut manually_accept_conf = UserConfig::default();
8055         manually_accept_conf.manually_accept_inbound_channels = true;
8056         let chanmon_cfgs = create_chanmon_cfgs(2);
8057         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8058         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
8059         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8060
8061         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap();
8062         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8063
8064         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
8065
8066         // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
8067         // accepting the inbound channel request.
8068         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
8069
8070         let events = nodes[1].node.get_and_clear_pending_events();
8071         match events[0] {
8072                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
8073                         nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
8074                         let api_res = nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0);
8075                         match api_res {
8076                                 Err(APIError::APIMisuseError { err }) => {
8077                                         assert_eq!(err, "No such channel awaiting to be accepted.");
8078                                 },
8079                                 Ok(_) => panic!("Channel shouldn't be possible to be accepted twice"),
8080                                 Err(e) => panic!("Unexpected Error {:?}", e),
8081                         }
8082                 }
8083                 _ => panic!("Unexpected event"),
8084         }
8085
8086         // Ensure that the channel wasn't closed after attempting to accept it twice.
8087         let accept_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
8088         assert_eq!(accept_msg_ev.len(), 1);
8089
8090         match accept_msg_ev[0] {
8091                 MessageSendEvent::SendAcceptChannel { ref node_id, .. } => {
8092                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8093                 }
8094                 _ => panic!("Unexpected event"),
8095         }
8096 }
8097
8098 #[test]
8099 fn test_can_not_accept_unknown_inbound_channel() {
8100         let chanmon_cfg = create_chanmon_cfgs(2);
8101         let node_cfg = create_node_cfgs(2, &chanmon_cfg);
8102         let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
8103         let nodes = create_network(2, &node_cfg, &node_chanmgr);
8104
8105         let unknown_channel_id = ChannelId::new_zero();
8106         let api_res = nodes[0].node.accept_inbound_channel(&unknown_channel_id, &nodes[1].node.get_our_node_id(), 0);
8107         match api_res {
8108                 Err(APIError::APIMisuseError { err }) => {
8109                         assert_eq!(err, "No such channel awaiting to be accepted.");
8110                 },
8111                 Ok(_) => panic!("It shouldn't be possible to accept an unkown channel"),
8112                 Err(e) => panic!("Unexpected Error: {:?}", e),
8113         }
8114 }
8115
8116 #[test]
8117 fn test_onion_value_mpp_set_calculation() {
8118         // Test that we use the onion value `amt_to_forward` when
8119         // calculating whether we've reached the `total_msat` of an MPP
8120         // by having a routing node forward more than `amt_to_forward`
8121         // and checking that the receiving node doesn't generate
8122         // a PaymentClaimable event too early
8123         let node_count = 4;
8124         let chanmon_cfgs = create_chanmon_cfgs(node_count);
8125         let node_cfgs = create_node_cfgs(node_count, &chanmon_cfgs);
8126         let node_chanmgrs = create_node_chanmgrs(node_count, &node_cfgs, &vec![None; node_count]);
8127         let mut nodes = create_network(node_count, &node_cfgs, &node_chanmgrs);
8128
8129         let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8130         let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
8131         let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
8132         let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
8133
8134         let total_msat = 100_000;
8135         let expected_paths: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]];
8136         let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], total_msat);
8137         let sample_path = route.paths.pop().unwrap();
8138
8139         let mut path_1 = sample_path.clone();
8140         path_1.hops[0].pubkey = nodes[1].node.get_our_node_id();
8141         path_1.hops[0].short_channel_id = chan_1_id;
8142         path_1.hops[1].pubkey = nodes[3].node.get_our_node_id();
8143         path_1.hops[1].short_channel_id = chan_3_id;
8144         path_1.hops[1].fee_msat = 100_000;
8145         route.paths.push(path_1);
8146
8147         let mut path_2 = sample_path.clone();
8148         path_2.hops[0].pubkey = nodes[2].node.get_our_node_id();
8149         path_2.hops[0].short_channel_id = chan_2_id;
8150         path_2.hops[1].pubkey = nodes[3].node.get_our_node_id();
8151         path_2.hops[1].short_channel_id = chan_4_id;
8152         path_2.hops[1].fee_msat = 1_000;
8153         route.paths.push(path_2);
8154
8155         // Send payment
8156         let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes());
8157         let onion_session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
8158                 RecipientOnionFields::secret_only(our_payment_secret), payment_id, &route).unwrap();
8159         nodes[0].node.test_send_payment_internal(&route, our_payment_hash,
8160                 RecipientOnionFields::secret_only(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap();
8161         check_added_monitors!(nodes[0], expected_paths.len());
8162
8163         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8164         assert_eq!(events.len(), expected_paths.len());
8165
8166         // First path
8167         let ev = remove_first_msg_event_to_node(&expected_paths[0][0].node.get_our_node_id(), &mut events);
8168         let mut payment_event = SendEvent::from_event(ev);
8169         let mut prev_node = &nodes[0];
8170
8171         for (idx, &node) in expected_paths[0].iter().enumerate() {
8172                 assert_eq!(node.node.get_our_node_id(), payment_event.node_id);
8173
8174                 if idx == 0 { // routing node
8175                         let session_priv = [3; 32];
8176                         let height = nodes[0].best_block_info().1;
8177                         let session_priv = SecretKey::from_slice(&session_priv).unwrap();
8178                         let mut onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
8179                         let (mut onion_payloads, _, _) = onion_utils::build_onion_payloads(&route.paths[0], 100_000,
8180                                 RecipientOnionFields::secret_only(our_payment_secret), height + 1, &None).unwrap();
8181                         // Edit amt_to_forward to simulate the sender having set
8182                         // the final amount and the routing node taking less fee
8183                         if let msgs::OutboundOnionPayload::Receive { ref mut amt_msat, .. } = onion_payloads[1] {
8184                                 *amt_msat = 99_000;
8185                         } else { panic!() }
8186                         let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap();
8187                         payment_event.msgs[0].onion_routing_packet = new_onion_packet;
8188                 }
8189
8190                 node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]);
8191                 check_added_monitors!(node, 0);
8192                 commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false);
8193                 expect_pending_htlcs_forwardable!(node);
8194
8195                 if idx == 0 {
8196                         let mut events_2 = node.node.get_and_clear_pending_msg_events();
8197                         assert_eq!(events_2.len(), 1);
8198                         check_added_monitors!(node, 1);
8199                         payment_event = SendEvent::from_event(events_2.remove(0));
8200                         assert_eq!(payment_event.msgs.len(), 1);
8201                 } else {
8202                         let events_2 = node.node.get_and_clear_pending_events();
8203                         assert!(events_2.is_empty());
8204                 }
8205
8206                 prev_node = node;
8207         }
8208
8209         // Second path
8210         let ev = remove_first_msg_event_to_node(&expected_paths[1][0].node.get_our_node_id(), &mut events);
8211         pass_along_path(&nodes[0], expected_paths[1], 101_000, our_payment_hash.clone(), Some(our_payment_secret), ev, true, None);
8212
8213         claim_payment_along_route(&nodes[0], expected_paths, false, our_payment_preimage);
8214 }
8215
8216 fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64) {
8217
8218         let routing_node_count = msat_amounts.len();
8219         let node_count = routing_node_count + 2;
8220
8221         let chanmon_cfgs = create_chanmon_cfgs(node_count);
8222         let node_cfgs = create_node_cfgs(node_count, &chanmon_cfgs);
8223         let node_chanmgrs = create_node_chanmgrs(node_count, &node_cfgs, &vec![None; node_count]);
8224         let nodes = create_network(node_count, &node_cfgs, &node_chanmgrs);
8225
8226         let src_idx = 0;
8227         let dst_idx = 1;
8228
8229         // Create channels for each amount
8230         let mut expected_paths = Vec::with_capacity(routing_node_count);
8231         let mut src_chan_ids = Vec::with_capacity(routing_node_count);
8232         let mut dst_chan_ids = Vec::with_capacity(routing_node_count);
8233         for i in 0..routing_node_count {
8234                 let routing_node = 2 + i;
8235                 let src_chan_id = create_announced_chan_between_nodes(&nodes, src_idx, routing_node).0.contents.short_channel_id;
8236                 src_chan_ids.push(src_chan_id);
8237                 let dst_chan_id = create_announced_chan_between_nodes(&nodes, routing_node, dst_idx).0.contents.short_channel_id;
8238                 dst_chan_ids.push(dst_chan_id);
8239                 let path = vec![&nodes[routing_node], &nodes[dst_idx]];
8240                 expected_paths.push(path);
8241         }
8242         let expected_paths: Vec<&[&Node]> = expected_paths.iter().map(|route| route.as_slice()).collect();
8243
8244         // Create a route for each amount
8245         let example_amount = 100000;
8246         let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[src_idx], nodes[dst_idx], example_amount);
8247         let sample_path = route.paths.pop().unwrap();
8248         for i in 0..routing_node_count {
8249                 let routing_node = 2 + i;
8250                 let mut path = sample_path.clone();
8251                 path.hops[0].pubkey = nodes[routing_node].node.get_our_node_id();
8252                 path.hops[0].short_channel_id = src_chan_ids[i];
8253                 path.hops[1].pubkey = nodes[dst_idx].node.get_our_node_id();
8254                 path.hops[1].short_channel_id = dst_chan_ids[i];
8255                 path.hops[1].fee_msat = msat_amounts[i];
8256                 route.paths.push(path);
8257         }
8258
8259         // Send payment with manually set total_msat
8260         let payment_id = PaymentId(nodes[src_idx].keys_manager.backing.get_secure_random_bytes());
8261         let onion_session_privs = nodes[src_idx].node.test_add_new_pending_payment(our_payment_hash,
8262                 RecipientOnionFields::secret_only(our_payment_secret), payment_id, &route).unwrap();
8263         nodes[src_idx].node.test_send_payment_internal(&route, our_payment_hash,
8264                 RecipientOnionFields::secret_only(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap();
8265         check_added_monitors!(nodes[src_idx], expected_paths.len());
8266
8267         let mut events = nodes[src_idx].node.get_and_clear_pending_msg_events();
8268         assert_eq!(events.len(), expected_paths.len());
8269         let mut amount_received = 0;
8270         for (path_idx, expected_path) in expected_paths.iter().enumerate() {
8271                 let ev = remove_first_msg_event_to_node(&expected_path[0].node.get_our_node_id(), &mut events);
8272
8273                 let current_path_amount = msat_amounts[path_idx];
8274                 amount_received += current_path_amount;
8275                 let became_claimable_now = amount_received >= total_msat && amount_received - current_path_amount < total_msat;
8276                 pass_along_path(&nodes[src_idx], expected_path, amount_received, our_payment_hash.clone(), Some(our_payment_secret), ev, became_claimable_now, None);
8277         }
8278
8279         claim_payment_along_route(&nodes[src_idx], &expected_paths, false, our_payment_preimage);
8280 }
8281
8282 #[test]
8283 fn test_overshoot_mpp() {
8284         do_test_overshoot_mpp(&[100_000, 101_000], 200_000);
8285         do_test_overshoot_mpp(&[100_000, 10_000, 100_000], 200_000);
8286 }
8287
8288 #[test]
8289 fn test_simple_mpp() {
8290         // Simple test of sending a multi-path payment.
8291         let chanmon_cfgs = create_chanmon_cfgs(4);
8292         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
8293         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
8294         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
8295
8296         let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8297         let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
8298         let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
8299         let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
8300
8301         let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
8302         let path = route.paths[0].clone();
8303         route.paths.push(path);
8304         route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id();
8305         route.paths[0].hops[0].short_channel_id = chan_1_id;
8306         route.paths[0].hops[1].short_channel_id = chan_3_id;
8307         route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id();
8308         route.paths[1].hops[0].short_channel_id = chan_2_id;
8309         route.paths[1].hops[1].short_channel_id = chan_4_id;
8310         send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret);
8311         claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
8312 }
8313
8314 #[test]
8315 fn test_preimage_storage() {
8316         // Simple test of payment preimage storage allowing no client-side storage to claim payments
8317         let chanmon_cfgs = create_chanmon_cfgs(2);
8318         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8319         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8320         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8321
8322         create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8323
8324         {
8325                 let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 7200, None).unwrap();
8326                 let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
8327                 nodes[0].node.send_payment_with_route(&route, payment_hash,
8328                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
8329                 check_added_monitors!(nodes[0], 1);
8330                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8331                 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
8332                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8333                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8334         }
8335         // Note that after leaving the above scope we have no knowledge of any arguments or return
8336         // values from previous calls.
8337         expect_pending_htlcs_forwardable!(nodes[1]);
8338         let events = nodes[1].node.get_and_clear_pending_events();
8339         assert_eq!(events.len(), 1);
8340         match events[0] {
8341                 Event::PaymentClaimable { ref purpose, .. } => {
8342                         match &purpose {
8343                                 PaymentPurpose::InvoicePayment { payment_preimage, .. } => {
8344                                         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage.unwrap());
8345                                 },
8346                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
8347                         }
8348                 },
8349                 _ => panic!("Unexpected event"),
8350         }
8351 }
8352
8353 #[test]
8354 fn test_bad_secret_hash() {
8355         // Simple test of unregistered payment hash/invalid payment secret handling
8356         let chanmon_cfgs = create_chanmon_cfgs(2);
8357         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8358         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8359         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8360
8361         create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8362
8363         let random_payment_hash = PaymentHash([42; 32]);
8364         let random_payment_secret = PaymentSecret([43; 32]);
8365         let (our_payment_hash, our_payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 2, None).unwrap();
8366         let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
8367
8368         // All the below cases should end up being handled exactly identically, so we macro the
8369         // resulting events.
8370         macro_rules! handle_unknown_invalid_payment_data {
8371                 ($payment_hash: expr) => {
8372                         check_added_monitors!(nodes[0], 1);
8373                         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8374                         let payment_event = SendEvent::from_event(events.pop().unwrap());
8375                         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8376                         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8377
8378                         // We have to forward pending HTLCs once to process the receipt of the HTLC and then
8379                         // again to process the pending backwards-failure of the HTLC
8380                         expect_pending_htlcs_forwardable!(nodes[1]);
8381                         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment{ payment_hash: $payment_hash }]);
8382                         check_added_monitors!(nodes[1], 1);
8383
8384                         // We should fail the payment back
8385                         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
8386                         match events.pop().unwrap() {
8387                                 MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } } => {
8388                                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
8389                                         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
8390                                 },
8391                                 _ => panic!("Unexpected event"),
8392                         }
8393                 }
8394         }
8395
8396         let expected_error_code = 0x4000|15; // incorrect_or_unknown_payment_details
8397         // Error data is the HTLC value (100,000) and current block height
8398         let expected_error_data = [0, 0, 0, 0, 0, 1, 0x86, 0xa0, 0, 0, 0, CHAN_CONFIRM_DEPTH as u8];
8399
8400         // Send a payment with the right payment hash but the wrong payment secret
8401         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
8402                 RecipientOnionFields::secret_only(random_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
8403         handle_unknown_invalid_payment_data!(our_payment_hash);
8404         expect_payment_failed!(nodes[0], our_payment_hash, true, expected_error_code, expected_error_data);
8405
8406         // Send a payment with a random payment hash, but the right payment secret
8407         nodes[0].node.send_payment_with_route(&route, random_payment_hash,
8408                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(random_payment_hash.0)).unwrap();
8409         handle_unknown_invalid_payment_data!(random_payment_hash);
8410         expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
8411
8412         // Send a payment with a random payment hash and random payment secret
8413         nodes[0].node.send_payment_with_route(&route, random_payment_hash,
8414                 RecipientOnionFields::secret_only(random_payment_secret), PaymentId(random_payment_hash.0)).unwrap();
8415         handle_unknown_invalid_payment_data!(random_payment_hash);
8416         expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
8417 }
8418
8419 #[test]
8420 fn test_update_err_monitor_lockdown() {
8421         // Our monitor will lock update of local commitment transaction if a broadcastion condition
8422         // has been fulfilled (either force-close from Channel or block height requiring a HTLC-
8423         // timeout). Trying to update monitor after lockdown should return a ChannelMonitorUpdateStatus
8424         // error.
8425         //
8426         // This scenario may happen in a watchtower setup, where watchtower process a block height
8427         // triggering a timeout while a slow-block-processing ChannelManager receives a local signed
8428         // commitment at same time.
8429
8430         let chanmon_cfgs = create_chanmon_cfgs(2);
8431         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8432         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8433         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8434
8435         // Create some initial channel
8436         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
8437         let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 };
8438
8439         // Rebalance the network to generate htlc in the two directions
8440         send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
8441
8442         // Route a HTLC from node 0 to node 1 (but don't settle)
8443         let (preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
8444
8445         // Copy ChainMonitor to simulate a watchtower and update block height of node 0 until its ChannelMonitor timeout HTLC onchain
8446         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8447         let logger = test_utils::TestLogger::with_id(format!("node {}", 0));
8448         let persister = test_utils::TestPersister::new();
8449         let watchtower = {
8450                 let new_monitor = {
8451                         let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8452                         let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
8453                                         &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8454                         assert!(new_monitor == *monitor);
8455                         new_monitor
8456                 };
8457                 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8458                 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
8459                 watchtower
8460         };
8461         let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new());
8462         // Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating
8463         // transaction lock time requirements here.
8464         chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize(200, (block.clone(), 200));
8465         watchtower.chain_monitor.block_connected(&block, 200);
8466
8467         // Try to update ChannelMonitor
8468         nodes[1].node.claim_funds(preimage);
8469         check_added_monitors!(nodes[1], 1);
8470         expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
8471
8472         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8473         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
8474         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
8475         {
8476                 let mut node_0_per_peer_lock;
8477                 let mut node_0_peer_state_lock;
8478                 if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
8479                         if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
8480                                 assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress);
8481                                 assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8482                         } else { assert!(false); }
8483                 } else {
8484                         assert!(false);
8485                 }
8486         }
8487         // Our local monitor is in-sync and hasn't processed yet timeout
8488         check_added_monitors!(nodes[0], 1);
8489         let events = nodes[0].node.get_and_clear_pending_events();
8490         assert_eq!(events.len(), 1);
8491 }
8492
8493 #[test]
8494 fn test_concurrent_monitor_claim() {
8495         // Watchtower A receives block, broadcasts state N, then channel receives new state N+1,
8496         // sending it to both watchtowers, Bob accepts N+1, then receives block and broadcasts
8497         // the latest state N+1, Alice rejects state N+1, but Bob has already broadcast it,
8498         // state N+1 confirms. Alice claims output from state N+1.
8499
8500         let chanmon_cfgs = create_chanmon_cfgs(2);
8501         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8502         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8503         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8504
8505         // Create some initial channel
8506         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
8507         let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 };
8508
8509         // Rebalance the network to generate htlc in the two directions
8510         send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
8511
8512         // Route a HTLC from node 0 to node 1 (but don't settle)
8513         route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
8514
8515         // Copy ChainMonitor to simulate watchtower Alice and update block height her ChannelMonitor timeout HTLC onchain
8516         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8517         let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice"));
8518         let persister = test_utils::TestPersister::new();
8519         let alice_broadcaster = test_utils::TestBroadcaster::with_blocks(
8520                 Arc::new(Mutex::new(nodes[0].blocks.lock().unwrap().clone())),
8521         );
8522         let watchtower_alice = {
8523                 let new_monitor = {
8524                         let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8525                         let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
8526                                         &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8527                         assert!(new_monitor == *monitor);
8528                         new_monitor
8529                 };
8530                 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &alice_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8531                 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
8532                 watchtower
8533         };
8534         let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new());
8535         // Make Alice aware of enough blocks that it doesn't think we're violating transaction lock time
8536         // requirements here.
8537         const HTLC_TIMEOUT_BROADCAST: u32 = CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS;
8538         alice_broadcaster.blocks.lock().unwrap().resize((HTLC_TIMEOUT_BROADCAST) as usize, (block.clone(), HTLC_TIMEOUT_BROADCAST));
8539         watchtower_alice.chain_monitor.block_connected(&block, HTLC_TIMEOUT_BROADCAST);
8540
8541         // Watchtower Alice should have broadcast a commitment/HTLC-timeout
8542         let alice_state = {
8543                 let mut txn = alice_broadcaster.txn_broadcast();
8544                 assert_eq!(txn.len(), 2);
8545                 txn.remove(0)
8546         };
8547
8548         // Copy ChainMonitor to simulate watchtower Bob and make it receive a commitment update first.
8549         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8550         let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob"));
8551         let persister = test_utils::TestPersister::new();
8552         let bob_broadcaster = test_utils::TestBroadcaster::with_blocks(Arc::clone(&alice_broadcaster.blocks));
8553         let watchtower_bob = {
8554                 let new_monitor = {
8555                         let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8556                         let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
8557                                         &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8558                         assert!(new_monitor == *monitor);
8559                         new_monitor
8560                 };
8561                 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &bob_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8562                 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
8563                 watchtower
8564         };
8565         watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST - 1);
8566
8567         // Route another payment to generate another update with still previous HTLC pending
8568         let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 3000000);
8569         nodes[1].node.send_payment_with_route(&route, payment_hash,
8570                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
8571         check_added_monitors!(nodes[1], 1);
8572
8573         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8574         assert_eq!(updates.update_add_htlcs.len(), 1);
8575         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]);
8576         {
8577                 let mut node_0_per_peer_lock;
8578                 let mut node_0_peer_state_lock;
8579                 if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
8580                         if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
8581                                 // Watchtower Alice should already have seen the block and reject the update
8582                                 assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress);
8583                                 assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8584                                 assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8585                         } else { assert!(false); }
8586                 } else {
8587                         assert!(false);
8588                 }
8589         }
8590         // Our local monitor is in-sync and hasn't processed yet timeout
8591         check_added_monitors!(nodes[0], 1);
8592
8593         //// Provide one more block to watchtower Bob, expect broadcast of commitment and HTLC-Timeout
8594         watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST);
8595
8596         // Watchtower Bob should have broadcast a commitment/HTLC-timeout
8597         let bob_state_y;
8598         {
8599                 let mut txn = bob_broadcaster.txn_broadcast();
8600                 assert_eq!(txn.len(), 2);
8601                 bob_state_y = txn.remove(0);
8602         };
8603
8604         // We confirm Bob's state Y on Alice, she should broadcast a HTLC-timeout
8605         let height = HTLC_TIMEOUT_BROADCAST + 1;
8606         connect_blocks(&nodes[0], height - nodes[0].best_block_info().1);
8607         check_closed_broadcast(&nodes[0], 1, true);
8608         check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed, false,
8609                 [nodes[1].node.get_our_node_id()], 100000);
8610         watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height);
8611         check_added_monitors(&nodes[0], 1);
8612         {
8613                 let htlc_txn = alice_broadcaster.txn_broadcast();
8614                 assert_eq!(htlc_txn.len(), 2);
8615                 check_spends!(htlc_txn[0], bob_state_y);
8616                 // Alice doesn't clean up the old HTLC claim since it hasn't seen a conflicting spend for
8617                 // it. However, she should, because it now has an invalid parent.
8618                 check_spends!(htlc_txn[1], alice_state);
8619         }
8620 }
8621
8622 #[test]
8623 fn test_pre_lockin_no_chan_closed_update() {
8624         // Test that if a peer closes a channel in response to a funding_created message we don't
8625         // generate a channel update (as the channel cannot appear on chain without a funding_signed
8626         // message).
8627         //
8628         // Doing so would imply a channel monitor update before the initial channel monitor
8629         // registration, violating our API guarantees.
8630         //
8631         // Previously, full_stack_target managed to hit this case by opening then closing a channel,
8632         // then opening a second channel with the same funding output as the first (which is not
8633         // rejected because the first channel does not exist in the ChannelManager) and closing it
8634         // before receiving funding_signed.
8635         let chanmon_cfgs = create_chanmon_cfgs(2);
8636         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8637         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8638         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8639
8640         // Create an initial channel
8641         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
8642         let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8643         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
8644         let accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
8645         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
8646
8647         // Move the first channel through the funding flow...
8648         let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
8649
8650         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
8651         check_added_monitors!(nodes[0], 0);
8652
8653         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
8654         let channel_id = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
8655         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
8656         assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
8657         check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true,
8658                 [nodes[1].node.get_our_node_id()], 100000);
8659 }
8660
8661 #[test]
8662 fn test_htlc_no_detection() {
8663         // This test is a mutation to underscore the detection logic bug we had
8664         // before #653. HTLC value routed is above the remaining balance, thus
8665         // inverting HTLC and `to_remote` output. HTLC will come second and
8666         // it wouldn't be seen by pre-#653 detection as we were enumerate()'ing
8667         // on a watched outputs vector (Vec<TxOut>) thus implicitly relying on
8668         // outputs order detection for correct spending children filtring.
8669
8670         let chanmon_cfgs = create_chanmon_cfgs(2);
8671         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8672         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8673         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8674
8675         // Create some initial channels
8676         let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
8677
8678         send_payment(&nodes[0], &vec!(&nodes[1])[..], 1_000_000);
8679         let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 2_000_000);
8680         let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
8681         assert_eq!(local_txn[0].input.len(), 1);
8682         assert_eq!(local_txn[0].output.len(), 3);
8683         check_spends!(local_txn[0], chan_1.3);
8684
8685         // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
8686         let block = create_dummy_block(nodes[0].best_block_hash(), 42, vec![local_txn[0].clone()]);
8687         connect_block(&nodes[0], &block);
8688         // We deliberately connect the local tx twice as this should provoke a failure calling
8689         // this test before #653 fix.
8690         chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &block, nodes[0].best_block_info().1 + 1);
8691         check_closed_broadcast!(nodes[0], true);
8692         check_added_monitors!(nodes[0], 1);
8693         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
8694         connect_blocks(&nodes[0], TEST_FINAL_CLTV);
8695
8696         let htlc_timeout = {
8697                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8698                 assert_eq!(node_txn.len(), 1);
8699                 assert_eq!(node_txn[0].input.len(), 1);
8700                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
8701                 check_spends!(node_txn[0], local_txn[0]);
8702                 node_txn[0].clone()
8703         };
8704
8705         connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![htlc_timeout.clone()]));
8706         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
8707         expect_payment_failed!(nodes[0], our_payment_hash, false);
8708 }
8709
8710 fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain_before_fulfill: bool) {
8711         // If we route an HTLC, then learn the HTLC's preimage after the upstream channel has been
8712         // force-closed, we must claim that HTLC on-chain. (Given an HTLC forwarded from Alice --> Bob -->
8713         // Carol, Alice would be the upstream node, and Carol the downstream.)
8714         //
8715         // Steps of the test:
8716         // 1) Alice sends a HTLC to Carol through Bob.
8717         // 2) Carol doesn't settle the HTLC.
8718         // 3) If broadcast_alice is true, Alice force-closes her channel with Bob. Else Bob force closes.
8719         // Steps 4 and 5 may be reordered depending on go_onchain_before_fulfill.
8720         // 4) Bob sees the Alice's commitment on his chain or vice versa. An offered output is present
8721         //    but can't be claimed as Bob doesn't have yet knowledge of the preimage.
8722         // 5) Carol release the preimage to Bob off-chain.
8723         // 6) Bob claims the offered output on the broadcasted commitment.
8724         let chanmon_cfgs = create_chanmon_cfgs(3);
8725         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
8726         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
8727         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
8728
8729         // Create some initial channels
8730         let chan_ab = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
8731         create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 10001);
8732
8733         // Steps (1) and (2):
8734         // Send an HTLC Alice --> Bob --> Carol, but Carol doesn't settle the HTLC back.
8735         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
8736
8737         // Check that Alice's commitment transaction now contains an output for this HTLC.
8738         let alice_txn = get_local_commitment_txn!(nodes[0], chan_ab.2);
8739         check_spends!(alice_txn[0], chan_ab.3);
8740         assert_eq!(alice_txn[0].output.len(), 2);
8741         check_spends!(alice_txn[1], alice_txn[0]); // 2nd transaction is a non-final HTLC-timeout
8742         assert_eq!(alice_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
8743         assert_eq!(alice_txn.len(), 2);
8744
8745         // Steps (3) and (4):
8746         // If `go_onchain_before_fufill`, broadcast the relevant commitment transaction and check that Bob
8747         // responds by (1) broadcasting a channel update and (2) adding a new ChannelMonitor.
8748         let mut force_closing_node = 0; // Alice force-closes
8749         let mut counterparty_node = 1; // Bob if Alice force-closes
8750
8751         // Bob force-closes
8752         if !broadcast_alice {
8753                 force_closing_node = 1;
8754                 counterparty_node = 0;
8755         }
8756         nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
8757         check_closed_broadcast!(nodes[force_closing_node], true);
8758         check_added_monitors!(nodes[force_closing_node], 1);
8759         check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed, [nodes[counterparty_node].node.get_our_node_id()], 100000);
8760         if go_onchain_before_fulfill {
8761                 let txn_to_broadcast = match broadcast_alice {
8762                         true => alice_txn.clone(),
8763                         false => get_local_commitment_txn!(nodes[1], chan_ab.2)
8764                 };
8765                 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()]));
8766                 if broadcast_alice {
8767                         check_closed_broadcast!(nodes[1], true);
8768                         check_added_monitors!(nodes[1], 1);
8769                         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
8770                 }
8771         }
8772
8773         // Step (5):
8774         // Carol then claims the funds and sends an update_fulfill message to Bob, and they go through the
8775         // process of removing the HTLC from their commitment transactions.
8776         nodes[2].node.claim_funds(payment_preimage);
8777         check_added_monitors!(nodes[2], 1);
8778         expect_payment_claimed!(nodes[2], payment_hash, 3_000_000);
8779
8780         let carol_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
8781         assert!(carol_updates.update_add_htlcs.is_empty());
8782         assert!(carol_updates.update_fail_htlcs.is_empty());
8783         assert!(carol_updates.update_fail_malformed_htlcs.is_empty());
8784         assert!(carol_updates.update_fee.is_none());
8785         assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1);
8786
8787         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]);
8788         let went_onchain = go_onchain_before_fulfill || force_closing_node == 1;
8789         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], if went_onchain { None } else { Some(1000) }, went_onchain, false);
8790         // If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage.
8791         if !go_onchain_before_fulfill && broadcast_alice {
8792                 let events = nodes[1].node.get_and_clear_pending_msg_events();
8793                 assert_eq!(events.len(), 1);
8794                 match events[0] {
8795                         MessageSendEvent::UpdateHTLCs { ref node_id, .. } => {
8796                                 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8797                         },
8798                         _ => panic!("Unexpected event"),
8799                 };
8800         }
8801         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &carol_updates.commitment_signed);
8802         // One monitor update for the preimage to update the Bob<->Alice channel, one monitor update
8803         // Carol<->Bob's updated commitment transaction info.
8804         check_added_monitors!(nodes[1], 2);
8805
8806         let events = nodes[1].node.get_and_clear_pending_msg_events();
8807         assert_eq!(events.len(), 2);
8808         let bob_revocation = match events[0] {
8809                 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
8810                         assert_eq!(*node_id, nodes[2].node.get_our_node_id());
8811                         (*msg).clone()
8812                 },
8813                 _ => panic!("Unexpected event"),
8814         };
8815         let bob_updates = match events[1] {
8816                 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
8817                         assert_eq!(*node_id, nodes[2].node.get_our_node_id());
8818                         (*updates).clone()
8819                 },
8820                 _ => panic!("Unexpected event"),
8821         };
8822
8823         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revocation);
8824         check_added_monitors!(nodes[2], 1);
8825         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_updates.commitment_signed);
8826         check_added_monitors!(nodes[2], 1);
8827
8828         let events = nodes[2].node.get_and_clear_pending_msg_events();
8829         assert_eq!(events.len(), 1);
8830         let carol_revocation = match events[0] {
8831                 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
8832                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
8833                         (*msg).clone()
8834                 },
8835                 _ => panic!("Unexpected event"),
8836         };
8837         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &carol_revocation);
8838         check_added_monitors!(nodes[1], 1);
8839
8840         // If this test requires the force-closed channel to not be on-chain until after the fulfill,
8841         // here's where we put said channel's commitment tx on-chain.
8842         let mut txn_to_broadcast = alice_txn.clone();
8843         if !broadcast_alice { txn_to_broadcast = get_local_commitment_txn!(nodes[1], chan_ab.2); }
8844         if !go_onchain_before_fulfill {
8845                 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()]));
8846                 // If Bob was the one to force-close, he will have already passed these checks earlier.
8847                 if broadcast_alice {
8848                         check_closed_broadcast!(nodes[1], true);
8849                         check_added_monitors!(nodes[1], 1);
8850                         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
8851                 }
8852                 let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
8853                 if broadcast_alice {
8854                         assert_eq!(bob_txn.len(), 1);
8855                         check_spends!(bob_txn[0], txn_to_broadcast[0]);
8856                 } else {
8857                         assert_eq!(bob_txn.len(), 2);
8858                         check_spends!(bob_txn[0], chan_ab.3);
8859                 }
8860         }
8861
8862         // Step (6):
8863         // Finally, check that Bob broadcasted a preimage-claiming transaction for the HTLC output on the
8864         // broadcasted commitment transaction.
8865         {
8866                 let script_weight = match broadcast_alice {
8867                         true => OFFERED_HTLC_SCRIPT_WEIGHT,
8868                         false => ACCEPTED_HTLC_SCRIPT_WEIGHT
8869                 };
8870                 // If Alice force-closed, Bob only broadcasts a HTLC-output-claiming transaction. Otherwise,
8871                 // Bob force-closed and broadcasts the commitment transaction along with a
8872                 // HTLC-output-claiming transaction.
8873                 let bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
8874                 if broadcast_alice {
8875                         assert_eq!(bob_txn.len(), 1);
8876                         check_spends!(bob_txn[0], txn_to_broadcast[0]);
8877                         assert_eq!(bob_txn[0].input[0].witness.last().unwrap().len(), script_weight);
8878                 } else {
8879                         assert_eq!(bob_txn.len(), 2);
8880                         check_spends!(bob_txn[1], txn_to_broadcast[0]);
8881                         assert_eq!(bob_txn[1].input[0].witness.last().unwrap().len(), script_weight);
8882                 }
8883         }
8884 }
8885
8886 #[test]
8887 fn test_onchain_htlc_settlement_after_close() {
8888         do_test_onchain_htlc_settlement_after_close(true, true);
8889         do_test_onchain_htlc_settlement_after_close(false, true); // Technically redundant, but may as well
8890         do_test_onchain_htlc_settlement_after_close(true, false);
8891         do_test_onchain_htlc_settlement_after_close(false, false);
8892 }
8893
8894 #[test]
8895 fn test_duplicate_temporary_channel_id_from_different_peers() {
8896         // Tests that we can accept two different `OpenChannel` requests with the same
8897         // `temporary_channel_id`, as long as they are from different peers.
8898         let chanmon_cfgs = create_chanmon_cfgs(3);
8899         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
8900         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
8901         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
8902
8903         // Create an first channel channel
8904         nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
8905         let mut open_chan_msg_chan_1_0 = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
8906
8907         // Create an second channel
8908         nodes[2].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 43, None, None).unwrap();
8909         let mut open_chan_msg_chan_2_0 = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
8910
8911         // Modify the `OpenChannel` from `nodes[2]` to `nodes[0]` to ensure that it uses the same
8912         // `temporary_channel_id` as the `OpenChannel` from nodes[1] to nodes[0].
8913         open_chan_msg_chan_2_0.temporary_channel_id = open_chan_msg_chan_1_0.temporary_channel_id;
8914
8915         // Assert that `nodes[0]` can accept both `OpenChannel` requests, even though they use the same
8916         // `temporary_channel_id` as they are from different peers.
8917         nodes[0].node.handle_open_channel(&nodes[1].node.get_our_node_id(), &open_chan_msg_chan_1_0);
8918         {
8919                 let events = nodes[0].node.get_and_clear_pending_msg_events();
8920                 assert_eq!(events.len(), 1);
8921                 match &events[0] {
8922                         MessageSendEvent::SendAcceptChannel { node_id, msg } => {
8923                                 assert_eq!(node_id, &nodes[1].node.get_our_node_id());
8924                                 assert_eq!(msg.temporary_channel_id, open_chan_msg_chan_1_0.temporary_channel_id);
8925                         },
8926                         _ => panic!("Unexpected event"),
8927                 }
8928         }
8929
8930         nodes[0].node.handle_open_channel(&nodes[2].node.get_our_node_id(), &open_chan_msg_chan_2_0);
8931         {
8932                 let events = nodes[0].node.get_and_clear_pending_msg_events();
8933                 assert_eq!(events.len(), 1);
8934                 match &events[0] {
8935                         MessageSendEvent::SendAcceptChannel { node_id, msg } => {
8936                                 assert_eq!(node_id, &nodes[2].node.get_our_node_id());
8937                                 assert_eq!(msg.temporary_channel_id, open_chan_msg_chan_1_0.temporary_channel_id);
8938                         },
8939                         _ => panic!("Unexpected event"),
8940                 }
8941         }
8942 }
8943
8944 #[test]
8945 fn test_duplicate_chan_id() {
8946         // Test that if a given peer tries to open a channel with the same channel_id as one that is
8947         // already open we reject it and keep the old channel.
8948         //
8949         // Previously, full_stack_target managed to figure out that if you tried to open two channels
8950         // with the same funding output (ie post-funding channel_id), we'd create a monitor update for
8951         // the existing channel when we detect the duplicate new channel, screwing up our monitor
8952         // updating logic for the existing channel.
8953         let chanmon_cfgs = create_chanmon_cfgs(2);
8954         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8955         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8956         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8957
8958         // Create an initial channel
8959         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
8960         let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8961         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
8962         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
8963
8964         // Try to create a second channel with the same temporary_channel_id as the first and check
8965         // that it is rejected.
8966         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
8967         {
8968                 let events = nodes[1].node.get_and_clear_pending_msg_events();
8969                 assert_eq!(events.len(), 1);
8970                 match events[0] {
8971                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
8972                                 // Technically, at this point, nodes[1] would be justified in thinking both the
8973                                 // first (valid) and second (invalid) channels are closed, given they both have
8974                                 // the same non-temporary channel_id. However, currently we do not, so we just
8975                                 // move forward with it.
8976                                 assert_eq!(msg.channel_id, open_chan_msg.temporary_channel_id);
8977                                 assert_eq!(node_id, nodes[0].node.get_our_node_id());
8978                         },
8979                         _ => panic!("Unexpected event"),
8980                 }
8981         }
8982
8983         // Move the first channel through the funding flow...
8984         let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
8985
8986         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
8987         check_added_monitors!(nodes[0], 0);
8988
8989         let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
8990         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
8991         {
8992                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
8993                 assert_eq!(added_monitors.len(), 1);
8994                 assert_eq!(added_monitors[0].0, funding_output);
8995                 added_monitors.clear();
8996         }
8997         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
8998
8999         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9000
9001         let funding_outpoint = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index };
9002         let channel_id = funding_outpoint.to_channel_id();
9003
9004         // Now we have the first channel past funding_created (ie it has a txid-based channel_id, not a
9005         // temporary one).
9006
9007         // First try to open a second channel with a temporary channel id equal to the txid-based one.
9008         // Technically this is allowed by the spec, but we don't support it and there's little reason
9009         // to. Still, it shouldn't cause any other issues.
9010         open_chan_msg.temporary_channel_id = channel_id;
9011         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
9012         {
9013                 let events = nodes[1].node.get_and_clear_pending_msg_events();
9014                 assert_eq!(events.len(), 1);
9015                 match events[0] {
9016                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
9017                                 // Technically, at this point, nodes[1] would be justified in thinking both
9018                                 // channels are closed, but currently we do not, so we just move forward with it.
9019                                 assert_eq!(msg.channel_id, open_chan_msg.temporary_channel_id);
9020                                 assert_eq!(node_id, nodes[0].node.get_our_node_id());
9021                         },
9022                         _ => panic!("Unexpected event"),
9023                 }
9024         }
9025
9026         // Now try to create a second channel which has a duplicate funding output.
9027         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
9028         let open_chan_2_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9029         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_2_msg);
9030         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9031         create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); // Get and check the FundingGenerationReady event
9032
9033         let (_, funding_created) = {
9034                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
9035                 let mut a_peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
9036                 // Once we call `get_funding_created` the channel has a duplicate channel_id as
9037                 // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
9038                 // try to create another channel. Instead, we drop the channel entirely here (leaving the
9039                 // channelmanager in a possibly nonsense state instead).
9040                 match a_peer_state.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap() {
9041                         ChannelPhase::UnfundedOutboundV1(chan) => {
9042                                 let logger = test_utils::TestLogger::new();
9043                                 chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap()
9044                         },
9045                         _ => panic!("Unexpected ChannelPhase variant"),
9046                 }
9047         };
9048         check_added_monitors!(nodes[0], 0);
9049         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created.unwrap());
9050         // At this point we'll look up if the channel_id is present and immediately fail the channel
9051         // without trying to persist the `ChannelMonitor`.
9052         check_added_monitors!(nodes[1], 0);
9053
9054         // ...still, nodes[1] will reject the duplicate channel.
9055         {
9056                 let events = nodes[1].node.get_and_clear_pending_msg_events();
9057                 assert_eq!(events.len(), 1);
9058                 match events[0] {
9059                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
9060                                 // Technically, at this point, nodes[1] would be justified in thinking both
9061                                 // channels are closed, but currently we do not, so we just move forward with it.
9062                                 assert_eq!(msg.channel_id, channel_id);
9063                                 assert_eq!(node_id, nodes[0].node.get_our_node_id());
9064                         },
9065                         _ => panic!("Unexpected event"),
9066                 }
9067         }
9068
9069         // finally, finish creating the original channel and send a payment over it to make sure
9070         // everything is functional.
9071         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
9072         {
9073                 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
9074                 assert_eq!(added_monitors.len(), 1);
9075                 assert_eq!(added_monitors[0].0, funding_output);
9076                 added_monitors.clear();
9077         }
9078         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9079
9080         let events_4 = nodes[0].node.get_and_clear_pending_events();
9081         assert_eq!(events_4.len(), 0);
9082         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
9083         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
9084
9085         let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
9086         let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
9087         update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
9088
9089         send_payment(&nodes[0], &[&nodes[1]], 8000000);
9090 }
9091
9092 #[test]
9093 fn test_error_chans_closed() {
9094         // Test that we properly handle error messages, closing appropriate channels.
9095         //
9096         // Prior to #787 we'd allow a peer to make us force-close a channel we had with a different
9097         // peer. The "real" fix for that is to index channels with peers_ids, however in the mean time
9098         // we can test various edge cases around it to ensure we don't regress.
9099         let chanmon_cfgs = create_chanmon_cfgs(3);
9100         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9101         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9102         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9103
9104         // Create some initial channels
9105         let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9106         let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9107         let chan_3 = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100000, 10001);
9108
9109         assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
9110         assert_eq!(nodes[1].node.list_usable_channels().len(), 2);
9111         assert_eq!(nodes[2].node.list_usable_channels().len(), 1);
9112
9113         // Closing a channel from a different peer has no effect
9114         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_3.2, data: "ERR".to_owned() });
9115         assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
9116
9117         // Closing one channel doesn't impact others
9118         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
9119         check_added_monitors!(nodes[0], 1);
9120         check_closed_broadcast!(nodes[0], false);
9121         check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) },
9122                 [nodes[1].node.get_our_node_id()], 100000);
9123         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
9124         assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
9125         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
9126         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_3.2);
9127
9128         // A null channel ID should close all channels
9129         let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9130         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: ChannelId::new_zero(), data: "ERR".to_owned() });
9131         check_added_monitors!(nodes[0], 2);
9132         check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) },
9133                 [nodes[1].node.get_our_node_id(); 2], 100000);
9134         let events = nodes[0].node.get_and_clear_pending_msg_events();
9135         assert_eq!(events.len(), 2);
9136         match events[0] {
9137                 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
9138                         assert_eq!(msg.contents.flags & 2, 2);
9139                 },
9140                 _ => panic!("Unexpected event"),
9141         }
9142         match events[1] {
9143                 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
9144                         assert_eq!(msg.contents.flags & 2, 2);
9145                 },
9146                 _ => panic!("Unexpected event"),
9147         }
9148         // Note that at this point users of a standard PeerHandler will end up calling
9149         // peer_disconnected.
9150         assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
9151         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
9152
9153         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
9154         assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
9155         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
9156 }
9157
9158 #[test]
9159 fn test_invalid_funding_tx() {
9160         // Test that we properly handle invalid funding transactions sent to us from a peer.
9161         //
9162         // Previously, all other major lightning implementations had failed to properly sanitize
9163         // funding transactions from their counterparties, leading to a multi-implementation critical
9164         // security vulnerability (though we always sanitized properly, we've previously had
9165         // un-released crashes in the sanitization process).
9166         //
9167         // Further, if the funding transaction is consensus-valid, confirms, and is later spent, we'd
9168         // previously have crashed in `ChannelMonitor` even though we closed the channel as bogus and
9169         // gave up on it. We test this here by generating such a transaction.
9170         let chanmon_cfgs = create_chanmon_cfgs(2);
9171         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9172         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9173         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9174
9175         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 10_000, 42, None, None).unwrap();
9176         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
9177         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9178
9179         let (temporary_channel_id, mut tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
9180
9181         // Create a witness program which can be spent by a 4-empty-stack-elements witness and which is
9182         // 136 bytes long. This matches our "accepted HTLC preimage spend" matching, previously causing
9183         // a panic as we'd try to extract a 32 byte preimage from a witness element without checking
9184         // its length.
9185         let mut wit_program: Vec<u8> = channelmonitor::deliberately_bogus_accepted_htlc_witness_program();
9186         let wit_program_script: ScriptBuf = wit_program.into();
9187         for output in tx.output.iter_mut() {
9188                 // Make the confirmed funding transaction have a bogus script_pubkey
9189                 output.script_pubkey = ScriptBuf::new_v0_p2wsh(&wit_program_script.wscript_hash());
9190         }
9191
9192         nodes[0].node.funding_transaction_generated_unchecked(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone(), 0).unwrap();
9193         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
9194         check_added_monitors!(nodes[1], 1);
9195         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9196
9197         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
9198         check_added_monitors!(nodes[0], 1);
9199         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9200
9201         let events_1 = nodes[0].node.get_and_clear_pending_events();
9202         assert_eq!(events_1.len(), 0);
9203
9204         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
9205         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
9206         nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
9207
9208         let expected_err = "funding tx had wrong script/value or output index";
9209         confirm_transaction_at(&nodes[1], &tx, 1);
9210         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() },
9211                 [nodes[0].node.get_our_node_id()], 100000);
9212         check_added_monitors!(nodes[1], 1);
9213         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
9214         assert_eq!(events_2.len(), 1);
9215         if let MessageSendEvent::HandleError { node_id, action } = &events_2[0] {
9216                 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
9217                 if let msgs::ErrorAction::DisconnectPeer { msg } = action {
9218                         assert_eq!(msg.as_ref().unwrap().data, "Channel closed because of an exception: ".to_owned() + expected_err);
9219                 } else { panic!(); }
9220         } else { panic!(); }
9221         assert_eq!(nodes[1].node.list_channels().len(), 0);
9222
9223         // Now confirm a spend of the (bogus) funding transaction. As long as the witness is 5 elements
9224         // long the ChannelMonitor will try to read 32 bytes from the second-to-last element, panicing
9225         // as its not 32 bytes long.
9226         let mut spend_tx = Transaction {
9227                 version: 2i32, lock_time: LockTime::ZERO,
9228                 input: tx.output.iter().enumerate().map(|(idx, _)| TxIn {
9229                         previous_output: BitcoinOutPoint {
9230                                 txid: tx.txid(),
9231                                 vout: idx as u32,
9232                         },
9233                         script_sig: ScriptBuf::new(),
9234                         sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
9235                         witness: Witness::from_slice(&channelmonitor::deliberately_bogus_accepted_htlc_witness())
9236                 }).collect(),
9237                 output: vec![TxOut {
9238                         value: 1000,
9239                         script_pubkey: ScriptBuf::new(),
9240                 }]
9241         };
9242         check_spends!(spend_tx, tx);
9243         mine_transaction(&nodes[1], &spend_tx);
9244 }
9245
9246 #[test]
9247 fn test_coinbase_funding_tx() {
9248         // Miners are able to fund channels directly from coinbase transactions, however
9249         // by consensus rules, outputs of a coinbase transaction are encumbered by a 100
9250         // block maturity timelock. To ensure that a (non-0conf) channel like this is enforceable
9251         // on-chain, the minimum depth is updated to 100 blocks for coinbase funding transactions.
9252         //
9253         // Note that 0conf channels with coinbase funding transactions are unaffected and are
9254         // immediately operational after opening.
9255         let chanmon_cfgs = create_chanmon_cfgs(2);
9256         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9257         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9258         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9259
9260         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
9261         let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9262
9263         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
9264         let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
9265
9266         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
9267
9268         // Create the coinbase funding transaction.
9269         let (temporary_channel_id, tx, _) = create_coinbase_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
9270
9271         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9272         check_added_monitors!(nodes[0], 0);
9273         let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9274
9275         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
9276         check_added_monitors!(nodes[1], 1);
9277         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9278
9279         let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9280
9281         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
9282         check_added_monitors!(nodes[0], 1);
9283
9284         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9285         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
9286
9287         // Starting at height 0, we "confirm" the coinbase at height 1.
9288         confirm_transaction_at(&nodes[0], &tx, 1);
9289         // We connect 98 more blocks to have 99 confirmations for the coinbase transaction.
9290         connect_blocks(&nodes[0], COINBASE_MATURITY - 2);
9291         // Check that we have no pending message events (we have not queued a `channel_ready` yet).
9292         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
9293         // Now connect one more block which results in 100 confirmations of the coinbase transaction.
9294         connect_blocks(&nodes[0], 1);
9295         // There should now be a `channel_ready` which can be handled.
9296         let _ = &nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &get_event_msg!(&nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
9297
9298         confirm_transaction_at(&nodes[1], &tx, 1);
9299         connect_blocks(&nodes[1], COINBASE_MATURITY - 2);
9300         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
9301         connect_blocks(&nodes[1], 1);
9302         expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id());
9303         create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
9304 }
9305
9306 fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_timelock: bool) {
9307         // In the first version of the chain::Confirm interface, after a refactor was made to not
9308         // broadcast CSV-locked transactions until their CSV lock is up, we wouldn't reliably broadcast
9309         // transactions after a `transactions_confirmed` call. Specifically, if the chain, provided via
9310         // `best_block_updated` is at height N, and a transaction output which we wish to spend at
9311         // height N-1 (due to a CSV to height N-1) is provided at height N, we will not broadcast the
9312         // spending transaction until height N+1 (or greater). This was due to the way
9313         // `ChannelMonitor::transactions_confirmed` worked, only checking if we should broadcast a
9314         // spending transaction at the height the input transaction was confirmed at, not whether we
9315         // should broadcast a spending transaction at the current height.
9316         // A second, similar, issue involved failing HTLCs backwards - because we only provided the
9317         // height at which transactions were confirmed to `OnchainTx::update_claims_view`, it wasn't
9318         // aware that the anti-reorg-delay had, in fact, already expired, waiting to fail-backwards
9319         // until we learned about an additional block.
9320         //
9321         // As an additional check, if `test_height_before_timelock` is set, we instead test that we
9322         // aren't broadcasting transactions too early (ie not broadcasting them at all).
9323         let chanmon_cfgs = create_chanmon_cfgs(3);
9324         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9325         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9326         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9327         *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
9328
9329         create_announced_chan_between_nodes(&nodes, 0, 1);
9330         let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
9331         let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
9332         nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
9333         nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
9334
9335         nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
9336         check_closed_broadcast!(nodes[1], true);
9337         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
9338         check_added_monitors!(nodes[1], 1);
9339         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
9340         assert_eq!(node_txn.len(), 1);
9341
9342         let conf_height = nodes[1].best_block_info().1;
9343         if !test_height_before_timelock {
9344                 connect_blocks(&nodes[1], 24 * 6);
9345         }
9346         nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
9347                 &nodes[1].get_block_header(conf_height), &[(0, &node_txn[0])], conf_height);
9348         if test_height_before_timelock {
9349                 // If we confirmed the close transaction, but timelocks have not yet expired, we should not
9350                 // generate any events or broadcast any transactions
9351                 assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
9352                 assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
9353         } else {
9354                 // We should broadcast an HTLC transaction spending our funding transaction first
9355                 let spending_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
9356                 assert_eq!(spending_txn.len(), 2);
9357                 assert_eq!(spending_txn[0].txid(), node_txn[0].txid());
9358                 check_spends!(spending_txn[1], node_txn[0]);
9359                 // We should also generate a SpendableOutputs event with the to_self output (as its
9360                 // timelock is up).
9361                 let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
9362                 assert_eq!(descriptor_spend_txn.len(), 1);
9363
9364                 // If we also discover that the HTLC-Timeout transaction was confirmed some time ago, we
9365                 // should immediately fail-backwards the HTLC to the previous hop, without waiting for an
9366                 // additional block built on top of the current chain.
9367                 nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
9368                         &nodes[1].get_block_header(conf_height + 1), &[(0, &spending_txn[1])], conf_height + 1);
9369                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]);
9370                 check_added_monitors!(nodes[1], 1);
9371
9372                 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9373                 assert!(updates.update_add_htlcs.is_empty());
9374                 assert!(updates.update_fulfill_htlcs.is_empty());
9375                 assert_eq!(updates.update_fail_htlcs.len(), 1);
9376                 assert!(updates.update_fail_malformed_htlcs.is_empty());
9377                 assert!(updates.update_fee.is_none());
9378                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
9379                 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
9380                 expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_announce.contents.short_channel_id, true);
9381         }
9382 }
9383
9384 #[test]
9385 fn test_tx_confirmed_skipping_blocks_immediate_broadcast() {
9386         do_test_tx_confirmed_skipping_blocks_immediate_broadcast(false);
9387         do_test_tx_confirmed_skipping_blocks_immediate_broadcast(true);
9388 }
9389
9390 fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) {
9391         let chanmon_cfgs = create_chanmon_cfgs(2);
9392         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9393         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9394         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9395
9396         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9397
9398         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
9399                 .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
9400         let route = get_route!(nodes[0], payment_params, 10_000).unwrap();
9401
9402         let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[1]);
9403
9404         {
9405                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
9406                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
9407                 check_added_monitors!(nodes[0], 1);
9408                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9409                 assert_eq!(events.len(), 1);
9410                 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
9411                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9412                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
9413         }
9414         expect_pending_htlcs_forwardable!(nodes[1]);
9415         expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 10_000);
9416
9417         {
9418                 // Note that we use a different PaymentId here to allow us to duplicativly pay
9419                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
9420                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_secret.0)).unwrap();
9421                 check_added_monitors!(nodes[0], 1);
9422                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9423                 assert_eq!(events.len(), 1);
9424                 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
9425                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9426                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
9427                 // At this point, nodes[1] would notice it has too much value for the payment. It will
9428                 // assume the second is a privacy attack (no longer particularly relevant
9429                 // post-payment_secrets) and fail back the new HTLC. Previously, it'd also have failed back
9430                 // the first HTLC delivered above.
9431         }
9432
9433         expect_pending_htlcs_forwardable_ignore!(nodes[1]);
9434         nodes[1].node.process_pending_htlc_forwards();
9435
9436         if test_for_second_fail_panic {
9437                 // Now we go fail back the first HTLC from the user end.
9438                 nodes[1].node.fail_htlc_backwards(&our_payment_hash);
9439
9440                 let expected_destinations = vec![
9441                         HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
9442                         HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
9443                 ];
9444                 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1],  expected_destinations);
9445                 nodes[1].node.process_pending_htlc_forwards();
9446
9447                 check_added_monitors!(nodes[1], 1);
9448                 let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9449                 assert_eq!(fail_updates_1.update_fail_htlcs.len(), 2);
9450
9451                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9452                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[1]);
9453                 commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
9454
9455                 let failure_events = nodes[0].node.get_and_clear_pending_events();
9456                 assert_eq!(failure_events.len(), 4);
9457                 if let Event::PaymentPathFailed { .. } = failure_events[0] {} else { panic!(); }
9458                 if let Event::PaymentFailed { .. } = failure_events[1] {} else { panic!(); }
9459                 if let Event::PaymentPathFailed { .. } = failure_events[2] {} else { panic!(); }
9460                 if let Event::PaymentFailed { .. } = failure_events[3] {} else { panic!(); }
9461         } else {
9462                 // Let the second HTLC fail and claim the first
9463                 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
9464                 nodes[1].node.process_pending_htlc_forwards();
9465
9466                 check_added_monitors!(nodes[1], 1);
9467                 let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9468                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9469                 commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
9470
9471                 expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new());
9472
9473                 claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
9474         }
9475 }
9476
9477 #[test]
9478 fn test_dup_htlc_second_fail_panic() {
9479         // Previously, if we received two HTLCs back-to-back, where the second overran the expected
9480         // value for the payment, we'd fail back both HTLCs after generating a `PaymentClaimable` event.
9481         // Then, if the user failed the second payment, they'd hit a "tried to fail an already failed
9482         // HTLC" debug panic. This tests for this behavior, checking that only one HTLC is auto-failed.
9483         do_test_dup_htlc_second_rejected(true);
9484 }
9485
9486 #[test]
9487 fn test_dup_htlc_second_rejected() {
9488         // Test that if we receive a second HTLC for an MPP payment that overruns the payment amount we
9489         // simply reject the second HTLC but are still able to claim the first HTLC.
9490         do_test_dup_htlc_second_rejected(false);
9491 }
9492
9493 #[test]
9494 fn test_inconsistent_mpp_params() {
9495         // Test that if we recieve two HTLCs with different payment parameters we fail back the first
9496         // such HTLC and allow the second to stay.
9497         let chanmon_cfgs = create_chanmon_cfgs(4);
9498         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9499         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9500         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9501
9502         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0);
9503         create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0);
9504         create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0);
9505         let chan_2_3 =create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0);
9506
9507         let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV)
9508                 .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap();
9509         let mut route = get_route!(nodes[0], payment_params, 15_000_000).unwrap();
9510         assert_eq!(route.paths.len(), 2);
9511         route.paths.sort_by(|path_a, _| {
9512                 // Sort the path so that the path through nodes[1] comes first
9513                 if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() {
9514                         core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
9515         });
9516
9517         let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[3]);
9518
9519         let cur_height = nodes[0].best_block_info().1;
9520         let payment_id = PaymentId([42; 32]);
9521
9522         let session_privs = {
9523                 // We create a fake route here so that we start with three pending HTLCs, which we'll
9524                 // ultimately have, just not right away.
9525                 let mut dup_route = route.clone();
9526                 dup_route.paths.push(route.paths[1].clone());
9527                 nodes[0].node.test_add_new_pending_payment(our_payment_hash,
9528                         RecipientOnionFields::secret_only(our_payment_secret), payment_id, &dup_route).unwrap()
9529         };
9530         nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash,
9531                 RecipientOnionFields::secret_only(our_payment_secret), 15_000_000, cur_height, payment_id,
9532                 &None, session_privs[0]).unwrap();
9533         check_added_monitors!(nodes[0], 1);
9534
9535         {
9536                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9537                 assert_eq!(events.len(), 1);
9538                 pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), false, None);
9539         }
9540         assert!(nodes[3].node.get_and_clear_pending_events().is_empty());
9541
9542         nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash,
9543                 RecipientOnionFields::secret_only(our_payment_secret), 14_000_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
9544         check_added_monitors!(nodes[0], 1);
9545
9546         {
9547                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9548                 assert_eq!(events.len(), 1);
9549                 let payment_event = SendEvent::from_event(events.pop().unwrap());
9550
9551                 nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9552                 commitment_signed_dance!(nodes[2], nodes[0], payment_event.commitment_msg, false);
9553
9554                 expect_pending_htlcs_forwardable!(nodes[2]);
9555                 check_added_monitors!(nodes[2], 1);
9556
9557                 let mut events = nodes[2].node.get_and_clear_pending_msg_events();
9558                 assert_eq!(events.len(), 1);
9559                 let payment_event = SendEvent::from_event(events.pop().unwrap());
9560
9561                 nodes[3].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
9562                 check_added_monitors!(nodes[3], 0);
9563                 commitment_signed_dance!(nodes[3], nodes[2], payment_event.commitment_msg, true, true);
9564
9565                 // At this point, nodes[3] should notice the two HTLCs don't contain the same total payment
9566                 // amount. It will assume the second is a privacy attack (no longer particularly relevant
9567                 // post-payment_secrets) and fail back the new HTLC.
9568         }
9569         expect_pending_htlcs_forwardable_ignore!(nodes[3]);
9570         nodes[3].node.process_pending_htlc_forwards();
9571         expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
9572         nodes[3].node.process_pending_htlc_forwards();
9573
9574         check_added_monitors!(nodes[3], 1);
9575
9576         let fail_updates_1 = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
9577         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9578         commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false);
9579
9580         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]);
9581         check_added_monitors!(nodes[2], 1);
9582
9583         let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
9584         nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &fail_updates_2.update_fail_htlcs[0]);
9585         commitment_signed_dance!(nodes[0], nodes[2], fail_updates_2.commitment_signed, false);
9586
9587         expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain());
9588
9589         nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash,
9590                 RecipientOnionFields::secret_only(our_payment_secret), 15_000_000, cur_height, payment_id,
9591                 &None, session_privs[2]).unwrap();
9592         check_added_monitors!(nodes[0], 1);
9593
9594         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9595         assert_eq!(events.len(), 1);
9596         pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), true, None);
9597
9598         do_claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, our_payment_preimage);
9599         expect_payment_sent(&nodes[0], our_payment_preimage, Some(None), true, true);
9600 }
9601
9602 #[test]
9603 fn test_double_partial_claim() {
9604         // Test what happens if a node receives a payment, generates a PaymentClaimable event, the HTLCs
9605         // time out, the sender resends only some of the MPP parts, then the user processes the
9606         // PaymentClaimable event, ensuring they don't inadvertently claim only part of the full payment
9607         // amount.
9608         let chanmon_cfgs = create_chanmon_cfgs(4);
9609         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9610         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9611         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9612
9613         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0);
9614         create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0);
9615         create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0);
9616         create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0);
9617
9618         let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000);
9619         assert_eq!(route.paths.len(), 2);
9620         route.paths.sort_by(|path_a, _| {
9621                 // Sort the path so that the path through nodes[1] comes first
9622                 if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() {
9623                         core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
9624         });
9625
9626         send_along_route_with_secret(&nodes[0], route.clone(), &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 15_000_000, payment_hash, payment_secret);
9627         // nodes[3] has now received a PaymentClaimable event...which it will take some (exorbitant)
9628         // amount of time to respond to.
9629
9630         // Connect some blocks to time out the payment
9631         connect_blocks(&nodes[3], TEST_FINAL_CLTV);
9632         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // To get the same height for sending later
9633
9634         let failed_destinations = vec![
9635                 HTLCDestination::FailedPayment { payment_hash },
9636                 HTLCDestination::FailedPayment { payment_hash },
9637         ];
9638         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations);
9639
9640         pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash, PaymentFailureReason::RecipientRejected);
9641
9642         // nodes[1] now retries one of the two paths...
9643         nodes[0].node.send_payment_with_route(&route, payment_hash,
9644                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9645         check_added_monitors!(nodes[0], 2);
9646
9647         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9648         assert_eq!(events.len(), 2);
9649         let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
9650         pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_1_msgs, false, None);
9651
9652         // At this point nodes[3] has received one half of the payment, and the user goes to handle
9653         // that PaymentClaimable event they got hours ago and never handled...we should refuse to claim.
9654         nodes[3].node.claim_funds(payment_preimage);
9655         check_added_monitors!(nodes[3], 0);
9656         assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty());
9657 }
9658
9659 /// The possible events which may trigger a `max_dust_htlc_exposure` breach
9660 #[derive(Clone, Copy, PartialEq)]
9661 enum ExposureEvent {
9662         /// Breach occurs at HTLC forwarding (see `send_htlc`)
9663         AtHTLCForward,
9664         /// Breach occurs at HTLC reception (see `update_add_htlc`)
9665         AtHTLCReception,
9666         /// Breach occurs at outbound update_fee (see `send_update_fee`)
9667         AtUpdateFeeOutbound,
9668 }
9669
9670 fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool, multiplier_dust_limit: bool) {
9671         // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat`
9672         // policy.
9673         //
9674         // At HTLC forward (`send_payment()`), if the sum of the trimmed-to-dust HTLC inbound and
9675         // trimmed-to-dust HTLC outbound balance and this new payment as included on next
9676         // counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll reject the
9677         // update. At HTLC reception (`update_add_htlc()`), if the sum of the trimmed-to-dust HTLC
9678         // inbound and trimmed-to-dust HTLC outbound balance and this new received HTLC as included
9679         // on next counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll fail
9680         // the update. Note, we return a `temporary_channel_failure` (0x1000 | 7), as the channel
9681         // might be available again for HTLC processing once the dust bandwidth has cleared up.
9682
9683         let chanmon_cfgs = create_chanmon_cfgs(2);
9684         let mut config = test_default_channel_config();
9685         config.channel_config.max_dust_htlc_exposure = if multiplier_dust_limit {
9686                 // Default test fee estimator rate is 253 sat/kw, so we set the multiplier to 5_000_000 / 253
9687                 // to get roughly the same initial value as the default setting when this test was
9688                 // originally written.
9689                 MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253)
9690         } else { MaxDustHTLCExposure::FixedLimitMsat(5_000_000) }; // initial default setting value
9691         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9692         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
9693         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9694
9695         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap();
9696         let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9697         open_channel.max_htlc_value_in_flight_msat = 50_000_000;
9698         open_channel.max_accepted_htlcs = 60;
9699         if on_holder_tx {
9700                 open_channel.dust_limit_satoshis = 546;
9701         }
9702         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
9703         let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
9704         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
9705
9706         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9707
9708         let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
9709
9710         if on_holder_tx {
9711                 let mut node_0_per_peer_lock;
9712                 let mut node_0_peer_state_lock;
9713                 match get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id) {
9714                         ChannelPhase::UnfundedOutboundV1(chan) => {
9715                                 chan.context.holder_dust_limit_satoshis = 546;
9716                         },
9717                         _ => panic!("Unexpected ChannelPhase variant"),
9718                 }
9719         }
9720
9721         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9722         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
9723         check_added_monitors!(nodes[1], 1);
9724         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9725
9726         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
9727         check_added_monitors!(nodes[0], 1);
9728         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9729
9730         let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
9731         let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
9732         update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
9733
9734         // Fetch a route in advance as we will be unable to once we're unable to send.
9735         let (mut route, payment_hash, _, payment_secret) =
9736                 get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
9737
9738         let (dust_buffer_feerate, max_dust_htlc_exposure_msat) = {
9739                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
9740                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
9741                 let chan = chan_lock.channel_by_id.get(&channel_id).unwrap();
9742                 (chan.context().get_dust_buffer_feerate(None) as u64,
9743                 chan.context().get_max_dust_htlc_exposure_msat(&LowerBoundedFeeEstimator(nodes[0].fee_estimator)))
9744         };
9745         let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
9746         let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
9747
9748         let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
9749         let dust_inbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat;
9750
9751         let dust_htlc_on_counterparty_tx: u64 = 4;
9752         let dust_htlc_on_counterparty_tx_msat: u64 = max_dust_htlc_exposure_msat / dust_htlc_on_counterparty_tx;
9753
9754         if on_holder_tx {
9755                 if dust_outbound_balance {
9756                         // Outbound dust threshold: 2223 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`)
9757                         // Outbound dust balance: 4372 sats
9758                         // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2132 sats
9759                         for _ in 0..dust_outbound_htlc_on_holder_tx {
9760                                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_outbound_htlc_on_holder_tx_msat);
9761                                 nodes[0].node.send_payment_with_route(&route, payment_hash,
9762                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9763                         }
9764                 } else {
9765                         // Inbound dust threshold: 2324 sats (`dust_buffer_feerate` * HTLC_SUCCESS_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`)
9766                         // Inbound dust balance: 4372 sats
9767                         // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2031 sats
9768                         for _ in 0..dust_inbound_htlc_on_holder_tx {
9769                                 route_payment(&nodes[1], &[&nodes[0]], dust_inbound_htlc_on_holder_tx_msat);
9770                         }
9771                 }
9772         } else {
9773                 if dust_outbound_balance {
9774                         // Outbound dust threshold: 2132 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`)
9775                         // Outbound dust balance: 5000 sats
9776                         for _ in 0..dust_htlc_on_counterparty_tx - 1 {
9777                                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_htlc_on_counterparty_tx_msat);
9778                                 nodes[0].node.send_payment_with_route(&route, payment_hash,
9779                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9780                         }
9781                 } else {
9782                         // Inbound dust threshold: 2031 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`)
9783                         // Inbound dust balance: 5000 sats
9784                         for _ in 0..dust_htlc_on_counterparty_tx - 1 {
9785                                 route_payment(&nodes[1], &[&nodes[0]], dust_htlc_on_counterparty_tx_msat);
9786                         }
9787                 }
9788         }
9789
9790         if exposure_breach_event == ExposureEvent::AtHTLCForward {
9791                 route.paths[0].hops.last_mut().unwrap().fee_msat =
9792                         if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 1 };
9793                 // With default dust exposure: 5000 sats
9794                 if on_holder_tx {
9795                         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash,
9796                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
9797                                 ), true, APIError::ChannelUnavailable { .. }, {});
9798                 } else {
9799                         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash,
9800                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
9801                                 ), true, APIError::ChannelUnavailable { .. }, {});
9802                 }
9803         } else if exposure_breach_event == ExposureEvent::AtHTLCReception {
9804                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 4 });
9805                 nodes[1].node.send_payment_with_route(&route, payment_hash,
9806                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9807                 check_added_monitors!(nodes[1], 1);
9808                 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
9809                 assert_eq!(events.len(), 1);
9810                 let payment_event = SendEvent::from_event(events.remove(0));
9811                 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
9812                 // With default dust exposure: 5000 sats
9813                 if on_holder_tx {
9814                         // Outbound dust balance: 6399 sats
9815                         let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1);
9816                         let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat;
9817                         nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, max_dust_htlc_exposure_msat), 1);
9818                 } else {
9819                         // Outbound dust balance: 5200 sats
9820                         nodes[0].logger.assert_log("lightning::ln::channel".to_string(),
9821                                 format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
9822                                         dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx - 1) + dust_htlc_on_counterparty_tx_msat + 4,
9823                                         max_dust_htlc_exposure_msat), 1);
9824                 }
9825         } else if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound {
9826                 route.paths[0].hops.last_mut().unwrap().fee_msat = 2_500_000;
9827                 // For the multiplier dust exposure limit, since it scales with feerate,
9828                 // we need to add a lot of HTLCs that will become dust at the new feerate
9829                 // to cross the threshold.
9830                 for _ in 0..20 {
9831                         let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[1], Some(1_000), None);
9832                         nodes[0].node.send_payment_with_route(&route, payment_hash,
9833                                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9834                 }
9835                 {
9836                         let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
9837                         *feerate_lock = *feerate_lock * 10;
9838                 }
9839                 nodes[0].node.timer_tick_occurred();
9840                 check_added_monitors!(nodes[0], 1);
9841                 nodes[0].logger.assert_log_contains("lightning::ln::channel", "Cannot afford to send new feerate at 2530 without infringing max dust htlc exposure", 1);
9842         }
9843
9844         let _ = nodes[0].node.get_and_clear_pending_msg_events();
9845         let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
9846         added_monitors.clear();
9847 }
9848
9849 fn do_test_max_dust_htlc_exposure_by_threshold_type(multiplier_dust_limit: bool) {
9850         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit);
9851         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit);
9852         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit);
9853         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit);
9854         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit);
9855         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit);
9856         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit);
9857         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit);
9858         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit);
9859         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit);
9860         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit);
9861         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit);
9862 }
9863
9864 #[test]
9865 fn test_max_dust_htlc_exposure() {
9866         do_test_max_dust_htlc_exposure_by_threshold_type(false);
9867         do_test_max_dust_htlc_exposure_by_threshold_type(true);
9868 }
9869
9870 #[test]
9871 fn test_non_final_funding_tx() {
9872         let chanmon_cfgs = create_chanmon_cfgs(2);
9873         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9874         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9875         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9876
9877         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
9878         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9879         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
9880         let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
9881         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
9882
9883         let best_height = nodes[0].node.best_block.read().unwrap().height();
9884
9885         let chan_id = *nodes[0].network_chan_count.borrow();
9886         let events = nodes[0].node.get_and_clear_pending_events();
9887         let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::ScriptBuf::new(), sequence: Sequence(1), witness: Witness::from_slice(&[&[1]]) };
9888         assert_eq!(events.len(), 1);
9889         let mut tx = match events[0] {
9890                 Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => {
9891                         // Timelock the transaction _beyond_ the best client height + 1.
9892                         Transaction { version: chan_id as i32, lock_time: LockTime::from_height(best_height + 2).unwrap(), input: vec![input], output: vec![TxOut {
9893                                 value: *channel_value_satoshis, script_pubkey: output_script.clone(),
9894                         }]}
9895                 },
9896                 _ => panic!("Unexpected event"),
9897         };
9898         // Transaction should fail as it's evaluated as non-final for propagation.
9899         match nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()) {
9900                 Err(APIError::APIMisuseError { err }) => {
9901                         assert_eq!(format!("Funding transaction absolute timelock is non-final"), err);
9902                 },
9903                 _ => panic!()
9904         }
9905         let events = nodes[0].node.get_and_clear_pending_events();
9906         assert_eq!(events.len(), 1);
9907         match events[0] {
9908                 Event::ChannelClosed { channel_id, .. } => {
9909                         assert_eq!(channel_id, temp_channel_id);
9910                 },
9911                 _ => panic!("Unexpected event"),
9912         }
9913 }
9914
9915 #[test]
9916 fn test_non_final_funding_tx_within_headroom() {
9917         let chanmon_cfgs = create_chanmon_cfgs(2);
9918         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9919         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9920         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9921
9922         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
9923         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9924         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
9925         let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
9926         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
9927
9928         let best_height = nodes[0].node.best_block.read().unwrap().height();
9929
9930         let chan_id = *nodes[0].network_chan_count.borrow();
9931         let events = nodes[0].node.get_and_clear_pending_events();
9932         let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::ScriptBuf::new(), sequence: Sequence(1), witness: Witness::from_slice(&[[1]]) };
9933         assert_eq!(events.len(), 1);
9934         let mut tx = match events[0] {
9935                 Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => {
9936                         // Timelock the transaction within a +1 headroom from the best block.
9937                         Transaction { version: chan_id as i32, lock_time: LockTime::from_consensus(best_height + 1), input: vec![input], output: vec![TxOut {
9938                                 value: *channel_value_satoshis, script_pubkey: output_script.clone(),
9939                         }]}
9940                 },
9941                 _ => panic!("Unexpected event"),
9942         };
9943
9944         // Transaction should be accepted if it's in a +1 headroom from best block.
9945         assert!(nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
9946         get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9947 }
9948
9949 #[test]
9950 fn accept_busted_but_better_fee() {
9951         // If a peer sends us a fee update that is too low, but higher than our previous channel
9952         // feerate, we should accept it. In the future we may want to consider closing the channel
9953         // later, but for now we only accept the update.
9954         let mut chanmon_cfgs = create_chanmon_cfgs(2);
9955         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9956         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9957         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9958
9959         create_chan_between_nodes(&nodes[0], &nodes[1]);
9960
9961         // Set nodes[1] to expect 5,000 sat/kW.
9962         {
9963                 let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
9964                 *feerate_lock = 5000;
9965         }
9966
9967         // If nodes[0] increases their feerate, even if its not enough, nodes[1] should accept it.
9968         {
9969                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
9970                 *feerate_lock = 1000;
9971         }
9972         nodes[0].node.timer_tick_occurred();
9973         check_added_monitors!(nodes[0], 1);
9974
9975         let events = nodes[0].node.get_and_clear_pending_msg_events();
9976         assert_eq!(events.len(), 1);
9977         match events[0] {
9978                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
9979                         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
9980                         commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false);
9981                 },
9982                 _ => panic!("Unexpected event"),
9983         };
9984
9985         // If nodes[0] increases their feerate further, even if its not enough, nodes[1] should accept
9986         // it.
9987         {
9988                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
9989                 *feerate_lock = 2000;
9990         }
9991         nodes[0].node.timer_tick_occurred();
9992         check_added_monitors!(nodes[0], 1);
9993
9994         let events = nodes[0].node.get_and_clear_pending_msg_events();
9995         assert_eq!(events.len(), 1);
9996         match events[0] {
9997                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
9998                         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
9999                         commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false);
10000                 },
10001                 _ => panic!("Unexpected event"),
10002         };
10003
10004         // However, if nodes[0] decreases their feerate, nodes[1] should reject it and close the
10005         // channel.
10006         {
10007                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10008                 *feerate_lock = 1000;
10009         }
10010         nodes[0].node.timer_tick_occurred();
10011         check_added_monitors!(nodes[0], 1);
10012
10013         let events = nodes[0].node.get_and_clear_pending_msg_events();
10014         assert_eq!(events.len(), 1);
10015         match events[0] {
10016                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
10017                         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
10018                         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError {
10019                                 err: "Peer's feerate much too low. Actual: 1000. Our expected lower limit: 5000".to_owned() },
10020                                 [nodes[0].node.get_our_node_id()], 100000);
10021                         check_closed_broadcast!(nodes[1], true);
10022                         check_added_monitors!(nodes[1], 1);
10023                 },
10024                 _ => panic!("Unexpected event"),
10025         };
10026 }
10027
10028 fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash: bool) {
10029         let mut chanmon_cfgs = create_chanmon_cfgs(2);
10030         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10031         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10032         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10033         let min_final_cltv_expiry_delta = 120;
10034         let final_cltv_expiry_delta = if valid_delta { min_final_cltv_expiry_delta + 2 } else {
10035                 min_final_cltv_expiry_delta - 2 };
10036         let recv_value = 100_000;
10037
10038         create_chan_between_nodes(&nodes[0], &nodes[1]);
10039
10040         let payment_parameters = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), final_cltv_expiry_delta as u32);
10041         let (payment_hash, payment_preimage, payment_secret) = if use_user_hash {
10042                 let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[1],
10043                         Some(recv_value), Some(min_final_cltv_expiry_delta));
10044                 (payment_hash, payment_preimage, payment_secret)
10045         } else {
10046                 let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(recv_value), 7200, Some(min_final_cltv_expiry_delta)).unwrap();
10047                 (payment_hash, nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap(), payment_secret)
10048         };
10049         let route = get_route!(nodes[0], payment_parameters, recv_value).unwrap();
10050         nodes[0].node.send_payment_with_route(&route, payment_hash,
10051                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10052         check_added_monitors!(nodes[0], 1);
10053         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
10054         assert_eq!(events.len(), 1);
10055         let mut payment_event = SendEvent::from_event(events.pop().unwrap());
10056         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
10057         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
10058         expect_pending_htlcs_forwardable!(nodes[1]);
10059
10060         if valid_delta {
10061                 expect_payment_claimable!(nodes[1], payment_hash, payment_secret, recv_value, if use_user_hash {
10062                         None } else { Some(payment_preimage) }, nodes[1].node.get_our_node_id());
10063
10064                 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
10065         } else {
10066                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
10067
10068                 check_added_monitors!(nodes[1], 1);
10069
10070                 let fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
10071                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates.update_fail_htlcs[0]);
10072                 commitment_signed_dance!(nodes[0], nodes[1], fail_updates.commitment_signed, false, true);
10073
10074                 expect_payment_failed!(nodes[0], payment_hash, true);
10075         }
10076 }
10077
10078 #[test]
10079 fn test_payment_with_custom_min_cltv_expiry_delta() {
10080         do_payment_with_custom_min_final_cltv_expiry(false, false);
10081         do_payment_with_custom_min_final_cltv_expiry(false, true);
10082         do_payment_with_custom_min_final_cltv_expiry(true, false);
10083         do_payment_with_custom_min_final_cltv_expiry(true, true);
10084 }
10085
10086 #[test]
10087 fn test_disconnects_peer_awaiting_response_ticks() {
10088         // Tests that nodes which are awaiting on a response critical for channel responsiveness
10089         // disconnect their counterparty after `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10090         let mut chanmon_cfgs = create_chanmon_cfgs(2);
10091         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10092         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10093         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10094
10095         // Asserts a disconnect event is queued to the user.
10096         let check_disconnect_event = |node: &Node, should_disconnect: bool| {
10097                 let disconnect_event = node.node.get_and_clear_pending_msg_events().iter().find_map(|event|
10098                         if let MessageSendEvent::HandleError { action, .. } = event {
10099                                 if let msgs::ErrorAction::DisconnectPeerWithWarning { .. } = action {
10100                                         Some(())
10101                                 } else {
10102                                         None
10103                                 }
10104                         } else {
10105                                 None
10106                         }
10107                 );
10108                 assert_eq!(disconnect_event.is_some(), should_disconnect);
10109         };
10110
10111         // Fires timer ticks ensuring we only attempt to disconnect peers after reaching
10112         // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10113         let check_disconnect = |node: &Node| {
10114                 // No disconnect without any timer ticks.
10115                 check_disconnect_event(node, false);
10116
10117                 // No disconnect with 1 timer tick less than required.
10118                 for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS - 1 {
10119                         node.node.timer_tick_occurred();
10120                         check_disconnect_event(node, false);
10121                 }
10122
10123                 // Disconnect after reaching the required ticks.
10124                 node.node.timer_tick_occurred();
10125                 check_disconnect_event(node, true);
10126
10127                 // Disconnect again on the next tick if the peer hasn't been disconnected yet.
10128                 node.node.timer_tick_occurred();
10129                 check_disconnect_event(node, true);
10130         };
10131
10132         create_chan_between_nodes(&nodes[0], &nodes[1]);
10133
10134         // We'll start by performing a fee update with Alice (nodes[0]) on the channel.
10135         *nodes[0].fee_estimator.sat_per_kw.lock().unwrap() *= 2;
10136         nodes[0].node.timer_tick_occurred();
10137         check_added_monitors!(&nodes[0], 1);
10138         let alice_fee_update = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
10139         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), alice_fee_update.update_fee.as_ref().unwrap());
10140         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &alice_fee_update.commitment_signed);
10141         check_added_monitors!(&nodes[1], 1);
10142
10143         // This will prompt Bob (nodes[1]) to respond with his `CommitmentSigned` and `RevokeAndACK`.
10144         let (bob_revoke_and_ack, bob_commitment_signed) = get_revoke_commit_msgs!(&nodes[1], nodes[0].node.get_our_node_id());
10145         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revoke_and_ack);
10146         check_added_monitors!(&nodes[0], 1);
10147         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_commitment_signed);
10148         check_added_monitors(&nodes[0], 1);
10149
10150         // Alice then needs to send her final `RevokeAndACK` to complete the commitment dance. We
10151         // pretend Bob hasn't received the message and check whether he'll disconnect Alice after
10152         // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10153         let alice_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
10154         check_disconnect(&nodes[1]);
10155
10156         // Now, we'll reconnect them to test awaiting a `ChannelReestablish` message.
10157         //
10158         // Note that since the commitment dance didn't complete above, Alice is expected to resend her
10159         // final `RevokeAndACK` to Bob to complete it.
10160         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
10161         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10162         let bob_init = msgs::Init {
10163                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
10164         };
10165         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &bob_init, true).unwrap();
10166         let alice_init = msgs::Init {
10167                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
10168         };
10169         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &alice_init, true).unwrap();
10170
10171         // Upon reconnection, Alice sends her `ChannelReestablish` to Bob. Alice, however, hasn't
10172         // received Bob's yet, so she should disconnect him after reaching
10173         // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10174         let alice_channel_reestablish = get_event_msg!(
10175                 nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()
10176         );
10177         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &alice_channel_reestablish);
10178         check_disconnect(&nodes[0]);
10179
10180         // Bob now sends his `ChannelReestablish` to Alice to resume the channel and consider it "live".
10181         let bob_channel_reestablish = nodes[1].node.get_and_clear_pending_msg_events().iter().find_map(|event|
10182                 if let MessageSendEvent::SendChannelReestablish { node_id, msg } = event {
10183                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
10184                         Some(msg.clone())
10185                 } else {
10186                         None
10187                 }
10188         ).unwrap();
10189         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bob_channel_reestablish);
10190
10191         // Sanity check that Alice won't disconnect Bob since she's no longer waiting for any messages.
10192         for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS {
10193                 nodes[0].node.timer_tick_occurred();
10194                 check_disconnect_event(&nodes[0], false);
10195         }
10196
10197         // However, Bob is still waiting on Alice's `RevokeAndACK`, so he should disconnect her after
10198         // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10199         check_disconnect(&nodes[1]);
10200
10201         // Finally, have Bob process the last message.
10202         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &alice_revoke_and_ack);
10203         check_added_monitors(&nodes[1], 1);
10204
10205         // At this point, neither node should attempt to disconnect each other, since they aren't
10206         // waiting on any messages.
10207         for node in &nodes {
10208                 for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS {
10209                         node.node.timer_tick_occurred();
10210                         check_disconnect_event(node, false);
10211                 }
10212         }
10213 }
10214
10215 #[test]
10216 fn test_remove_expired_outbound_unfunded_channels() {
10217         let chanmon_cfgs = create_chanmon_cfgs(2);
10218         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10219         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10220         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10221
10222         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
10223         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10224         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10225         let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10226         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10227
10228         let events = nodes[0].node.get_and_clear_pending_events();
10229         assert_eq!(events.len(), 1);
10230         match events[0] {
10231                 Event::FundingGenerationReady { .. } => (),
10232                 _ => panic!("Unexpected event"),
10233         };
10234
10235         // Asserts the outbound channel has been removed from a nodes[0]'s peer state map.
10236         let check_outbound_channel_existence = |should_exist: bool| {
10237                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
10238                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
10239                 assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist);
10240         };
10241
10242         // Channel should exist without any timer ticks.
10243         check_outbound_channel_existence(true);
10244
10245         // Channel should exist with 1 timer tick less than required.
10246         for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
10247                 nodes[0].node.timer_tick_occurred();
10248                 check_outbound_channel_existence(true)
10249         }
10250
10251         // Remove channel after reaching the required ticks.
10252         nodes[0].node.timer_tick_occurred();
10253         check_outbound_channel_existence(false);
10254
10255         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
10256         assert_eq!(msg_events.len(), 1);
10257         match msg_events[0] {
10258                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
10259                         assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake");
10260                 },
10261                 _ => panic!("Unexpected event"),
10262         }
10263         check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
10264 }
10265
10266 #[test]
10267 fn test_remove_expired_inbound_unfunded_channels() {
10268         let chanmon_cfgs = create_chanmon_cfgs(2);
10269         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10270         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10271         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10272
10273         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
10274         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10275         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10276         let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10277         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10278
10279         let events = nodes[0].node.get_and_clear_pending_events();
10280         assert_eq!(events.len(), 1);
10281         match events[0] {
10282                 Event::FundingGenerationReady { .. } => (),
10283                 _ => panic!("Unexpected event"),
10284         };
10285
10286         // Asserts the inbound channel has been removed from a nodes[1]'s peer state map.
10287         let check_inbound_channel_existence = |should_exist: bool| {
10288                 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
10289                 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
10290                 assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist);
10291         };
10292
10293         // Channel should exist without any timer ticks.
10294         check_inbound_channel_existence(true);
10295
10296         // Channel should exist with 1 timer tick less than required.
10297         for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
10298                 nodes[1].node.timer_tick_occurred();
10299                 check_inbound_channel_existence(true)
10300         }
10301
10302         // Remove channel after reaching the required ticks.
10303         nodes[1].node.timer_tick_occurred();
10304         check_inbound_channel_existence(false);
10305
10306         let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
10307         assert_eq!(msg_events.len(), 1);
10308         match msg_events[0] {
10309                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
10310                         assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake");
10311                 },
10312                 _ => panic!("Unexpected event"),
10313         }
10314         check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
10315 }
10316
10317 fn do_test_multi_post_event_actions(do_reload: bool) {
10318         // Tests handling multiple post-Event actions at once.
10319         // There is specific code in ChannelManager to handle channels where multiple post-Event
10320         // `ChannelMonitorUpdates` are pending at once. This test exercises that code.
10321         //
10322         // Specifically, we test calling `get_and_clear_pending_events` while there are two
10323         // PaymentSents from different channels and one channel has two pending `ChannelMonitorUpdate`s
10324         // - one from an RAA and one from an inbound commitment_signed.
10325         let chanmon_cfgs = create_chanmon_cfgs(3);
10326         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10327         let (persister, chain_monitor);
10328         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
10329         let nodes_0_deserialized;
10330         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10331
10332         let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
10333         let chan_id_2 = create_announced_chan_between_nodes(&nodes, 0, 2).2;
10334
10335         send_payment(&nodes[0], &[&nodes[1]], 1_000_000);
10336         send_payment(&nodes[0], &[&nodes[2]], 1_000_000);
10337
10338         let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
10339         let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[2]], 1_000_000);
10340
10341         nodes[1].node.claim_funds(our_payment_preimage);
10342         check_added_monitors!(nodes[1], 1);
10343         expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000);
10344
10345         nodes[2].node.claim_funds(payment_preimage_2);
10346         check_added_monitors!(nodes[2], 1);
10347         expect_payment_claimed!(nodes[2], payment_hash_2, 1_000_000);
10348
10349         for dest in &[1, 2] {
10350                 let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[*dest], nodes[0].node.get_our_node_id());
10351                 nodes[0].node.handle_update_fulfill_htlc(&nodes[*dest].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]);
10352                 commitment_signed_dance!(nodes[0], nodes[*dest], htlc_fulfill_updates.commitment_signed, false);
10353                 check_added_monitors(&nodes[0], 0);
10354         }
10355
10356         let (route, payment_hash_3, _, payment_secret_3) =
10357                 get_route_and_payment_hash!(nodes[1], nodes[0], 100_000);
10358         let payment_id = PaymentId(payment_hash_3.0);
10359         nodes[1].node.send_payment_with_route(&route, payment_hash_3,
10360                 RecipientOnionFields::secret_only(payment_secret_3), payment_id).unwrap();
10361         check_added_monitors(&nodes[1], 1);
10362
10363         let send_event = SendEvent::from_node(&nodes[1]);
10364         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
10365         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event.commitment_msg);
10366         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
10367
10368         if do_reload {
10369                 let nodes_0_serialized = nodes[0].node.encode();
10370                 let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
10371                 let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_2).encode();
10372                 reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, chain_monitor, nodes_0_deserialized);
10373
10374                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10375                 nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10376
10377                 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
10378                 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[2]));
10379         }
10380
10381         let events = nodes[0].node.get_and_clear_pending_events();
10382         assert_eq!(events.len(), 4);
10383         if let Event::PaymentSent { payment_preimage, .. } = events[0] {
10384                 assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2);
10385         } else { panic!(); }
10386         if let Event::PaymentSent { payment_preimage, .. } = events[1] {
10387                 assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2);
10388         } else { panic!(); }
10389         if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); }
10390         if let Event::PaymentPathSuccessful { .. } = events[3] {} else { panic!(); }
10391
10392         // After the events are processed, the ChannelMonitorUpdates will be released and, upon their
10393         // completion, we'll respond to nodes[1] with an RAA + CS.
10394         get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
10395         check_added_monitors(&nodes[0], 3);
10396 }
10397
10398 #[test]
10399 fn test_multi_post_event_actions() {
10400         do_test_multi_post_event_actions(true);
10401         do_test_multi_post_event_actions(false);
10402 }
10403
10404 #[test]
10405 fn test_batch_channel_open() {
10406         let chanmon_cfgs = create_chanmon_cfgs(3);
10407         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10408         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
10409         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10410
10411         // Initiate channel opening and create the batch channel funding transaction.
10412         let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
10413                 (&nodes[1], 100_000, 0, 42, None),
10414                 (&nodes[2], 200_000, 0, 43, None),
10415         ]);
10416
10417         // Go through the funding_created and funding_signed flow with node 1.
10418         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
10419         check_added_monitors(&nodes[1], 1);
10420         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
10421
10422         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
10423         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
10424         check_added_monitors(&nodes[0], 1);
10425
10426         // The transaction should not have been broadcast before all channels are ready.
10427         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
10428
10429         // Go through the funding_created and funding_signed flow with node 2.
10430         nodes[2].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[1]);
10431         check_added_monitors(&nodes[2], 1);
10432         expect_channel_pending_event(&nodes[2], &nodes[0].node.get_our_node_id());
10433
10434         let funding_signed_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
10435         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
10436         nodes[0].node.handle_funding_signed(&nodes[2].node.get_our_node_id(), &funding_signed_msg);
10437         check_added_monitors(&nodes[0], 1);
10438
10439         // The transaction should not have been broadcast before persisting all monitors has been
10440         // completed.
10441         assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
10442         assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
10443
10444         // Complete the persistence of the monitor.
10445         nodes[0].chain_monitor.complete_sole_pending_chan_update(
10446                 &OutPoint { txid: tx.txid(), index: 1 }.to_channel_id()
10447         );
10448         let events = nodes[0].node.get_and_clear_pending_events();
10449
10450         // The transaction should only have been broadcast now.
10451         let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
10452         assert_eq!(broadcasted_txs.len(), 1);
10453         assert_eq!(broadcasted_txs[0], tx);
10454
10455         assert_eq!(events.len(), 2);
10456         assert!(events.iter().any(|e| matches!(
10457                 *e,
10458                 crate::events::Event::ChannelPending {
10459                         ref counterparty_node_id,
10460                         ..
10461                 } if counterparty_node_id == &nodes[1].node.get_our_node_id(),
10462         )));
10463         assert!(events.iter().any(|e| matches!(
10464                 *e,
10465                 crate::events::Event::ChannelPending {
10466                         ref counterparty_node_id,
10467                         ..
10468                 } if counterparty_node_id == &nodes[2].node.get_our_node_id(),
10469         )));
10470 }
10471
10472 #[test]
10473 fn test_disconnect_in_funding_batch() {
10474         let chanmon_cfgs = create_chanmon_cfgs(3);
10475         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10476         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
10477         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10478
10479         // Initiate channel opening and create the batch channel funding transaction.
10480         let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
10481                 (&nodes[1], 100_000, 0, 42, None),
10482                 (&nodes[2], 200_000, 0, 43, None),
10483         ]);
10484
10485         // Go through the funding_created and funding_signed flow with node 1.
10486         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
10487         check_added_monitors(&nodes[1], 1);
10488         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
10489
10490         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
10491         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
10492         check_added_monitors(&nodes[0], 1);
10493
10494         // The transaction should not have been broadcast before all channels are ready.
10495         assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
10496
10497         // The remaining peer in the batch disconnects.
10498         nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id());
10499
10500         // The channels in the batch will close immediately.
10501         let channel_id_1 = OutPoint { txid: tx.txid(), index: 0 }.to_channel_id();
10502         let channel_id_2 = OutPoint { txid: tx.txid(), index: 1 }.to_channel_id();
10503         check_closed_events(&nodes[0], &[
10504                 ExpectedCloseEvent {
10505                         channel_id: Some(channel_id_1),
10506                         discard_funding: true,
10507                         ..Default::default()
10508                 },
10509                 ExpectedCloseEvent {
10510                         channel_id: Some(channel_id_2),
10511                         discard_funding: true,
10512                         ..Default::default()
10513                 },
10514         ]);
10515
10516         // The monitor should become closed.
10517         check_added_monitors(&nodes[0], 1);
10518         {
10519                 let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
10520                 let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
10521                 assert_eq!(monitor_updates_1.len(), 1);
10522                 assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
10523         }
10524
10525         // The funding transaction should not have been broadcast, and therefore, we don't need
10526         // to broadcast a force-close transaction for the closed monitor.
10527         assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
10528
10529         // Ensure the channels don't exist anymore.
10530         assert!(nodes[0].node.list_channels().is_empty());
10531 }
10532
10533 #[test]
10534 fn test_batch_funding_close_after_funding_signed() {
10535         let chanmon_cfgs = create_chanmon_cfgs(3);
10536         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10537         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
10538         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10539
10540         // Initiate channel opening and create the batch channel funding transaction.
10541         let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
10542                 (&nodes[1], 100_000, 0, 42, None),
10543                 (&nodes[2], 200_000, 0, 43, None),
10544         ]);
10545
10546         // Go through the funding_created and funding_signed flow with node 1.
10547         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
10548         check_added_monitors(&nodes[1], 1);
10549         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
10550
10551         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
10552         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
10553         check_added_monitors(&nodes[0], 1);
10554
10555         // Go through the funding_created and funding_signed flow with node 2.
10556         nodes[2].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[1]);
10557         check_added_monitors(&nodes[2], 1);
10558         expect_channel_pending_event(&nodes[2], &nodes[0].node.get_our_node_id());
10559
10560         let funding_signed_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
10561         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
10562         nodes[0].node.handle_funding_signed(&nodes[2].node.get_our_node_id(), &funding_signed_msg);
10563         check_added_monitors(&nodes[0], 1);
10564
10565         // The transaction should not have been broadcast before all channels are ready.
10566         assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
10567
10568         // Force-close the channel for which we've completed the initial monitor.
10569         let channel_id_1 = OutPoint { txid: tx.txid(), index: 0 }.to_channel_id();
10570         let channel_id_2 = OutPoint { txid: tx.txid(), index: 1 }.to_channel_id();
10571         nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
10572         check_added_monitors(&nodes[0], 2);
10573         {
10574                 let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
10575                 let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
10576                 assert_eq!(monitor_updates_1.len(), 1);
10577                 assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
10578                 let monitor_updates_2 = monitor_updates.get(&channel_id_2).unwrap();
10579                 assert_eq!(monitor_updates_2.len(), 1);
10580                 assert_eq!(monitor_updates_2[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
10581         }
10582         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
10583         match msg_events[0] {
10584                 MessageSendEvent::HandleError { .. } => (),
10585                 _ => panic!("Unexpected message."),
10586         }
10587
10588         // We broadcast the commitment transaction as part of the force-close.
10589         {
10590                 let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
10591                 assert_eq!(broadcasted_txs.len(), 1);
10592                 assert!(broadcasted_txs[0].txid() != tx.txid());
10593                 assert_eq!(broadcasted_txs[0].input.len(), 1);
10594                 assert_eq!(broadcasted_txs[0].input[0].previous_output.txid, tx.txid());
10595         }
10596
10597         // All channels in the batch should close immediately.
10598         check_closed_events(&nodes[0], &[
10599                 ExpectedCloseEvent {
10600                         channel_id: Some(channel_id_1),
10601                         discard_funding: true,
10602                         ..Default::default()
10603                 },
10604                 ExpectedCloseEvent {
10605                         channel_id: Some(channel_id_2),
10606                         discard_funding: true,
10607                         ..Default::default()
10608                 },
10609         ]);
10610
10611         // Ensure the channels don't exist anymore.
10612         assert!(nodes[0].node.list_channels().is_empty());
10613 }
10614
10615 fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitment: bool) {
10616         // Tests that a node will forget the channel (when it only requires 1 confirmation) if the
10617         // funding and commitment transaction confirm in the same block.
10618         let chanmon_cfgs = create_chanmon_cfgs(2);
10619         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10620         let mut min_depth_1_block_cfg = test_default_channel_config();
10621         min_depth_1_block_cfg.channel_handshake_config.minimum_depth = 1;
10622         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(min_depth_1_block_cfg), Some(min_depth_1_block_cfg)]);
10623         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10624
10625         let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
10626         let chan_id = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 }.to_channel_id();
10627
10628         assert_eq!(nodes[0].node.list_channels().len(), 1);
10629         assert_eq!(nodes[1].node.list_channels().len(), 1);
10630
10631         let (closing_node, other_node) = if confirm_remote_commitment {
10632                 (&nodes[1], &nodes[0])
10633         } else {
10634                 (&nodes[0], &nodes[1])
10635         };
10636
10637         closing_node.node.force_close_broadcasting_latest_txn(&chan_id, &other_node.node.get_our_node_id()).unwrap();
10638         let mut msg_events = closing_node.node.get_and_clear_pending_msg_events();
10639         assert_eq!(msg_events.len(), 1);
10640         match msg_events.pop().unwrap() {
10641                 MessageSendEvent::HandleError { action: msgs::ErrorAction::DisconnectPeer { .. }, .. } => {},
10642                 _ => panic!("Unexpected event"),
10643         }
10644         check_added_monitors(closing_node, 1);
10645         check_closed_event(closing_node, 1, ClosureReason::HolderForceClosed, false, &[other_node.node.get_our_node_id()], 1_000_000);
10646
10647         let commitment_tx = {
10648                 let mut txn = closing_node.tx_broadcaster.txn_broadcast();
10649                 assert_eq!(txn.len(), 1);
10650                 let commitment_tx = txn.pop().unwrap();
10651                 check_spends!(commitment_tx, funding_tx);
10652                 commitment_tx
10653         };
10654
10655         mine_transactions(&nodes[0], &[&funding_tx, &commitment_tx]);
10656         mine_transactions(&nodes[1], &[&funding_tx, &commitment_tx]);
10657
10658         check_closed_broadcast(other_node, 1, true);
10659         check_added_monitors(other_node, 1);
10660         check_closed_event(other_node, 1, ClosureReason::CommitmentTxConfirmed, false, &[closing_node.node.get_our_node_id()], 1_000_000);
10661
10662         assert!(nodes[0].node.list_channels().is_empty());
10663         assert!(nodes[1].node.list_channels().is_empty());
10664 }
10665
10666 #[test]
10667 fn test_funding_and_commitment_tx_confirm_same_block() {
10668         do_test_funding_and_commitment_tx_confirm_same_block(false);
10669         do_test_funding_and_commitment_tx_confirm_same_block(true);
10670 }