]> git.bitcoin.ninja Git - rust-lightning/blob - lightning/src/ln/functional_tests.rs
Merge pull request #3061 from TheBlueMatt/2024-05-bindings-upstream
[rust-lightning] / lightning / src / ln / functional_tests.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 //! Tests that test standing up a network of ChannelManagers, creating channels, sending
11 //! payments/messages between them, and often checking the resulting ChannelMonitors are able to
12 //! claim outputs on-chain.
13
14 use crate::chain;
15 use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
16 use crate::chain::chaininterface::LowerBoundedFeeEstimator;
17 use crate::chain::channelmonitor;
18 use crate::chain::channelmonitor::{CLOSED_CHANNEL_UPDATE_ID, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
19 use crate::chain::transaction::OutPoint;
20 use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, OutputSpender, SignerProvider};
21 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
22 use crate::ln::types::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash};
23 use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase};
24 use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA};
25 use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError};
26 use crate::ln::{chan_utils, onion_utils};
27 use crate::ln::chan_utils::{OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment};
28 use crate::routing::gossip::{NetworkGraph, NetworkUpdate};
29 use crate::routing::router::{Path, PaymentParameters, Route, RouteHop, get_route, RouteParameters};
30 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
31 use crate::ln::msgs;
32 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
33 use crate::util::test_channel_signer::TestChannelSigner;
34 use crate::util::test_utils::{self, WatchtowerPersister};
35 use crate::util::errors::APIError;
36 use crate::util::ser::{Writeable, ReadableArgs};
37 use crate::util::string::UntrustedString;
38 use crate::util::config::{UserConfig, MaxDustHTLCExposure};
39
40 use bitcoin::hash_types::BlockHash;
41 use bitcoin::blockdata::locktime::absolute::LockTime;
42 use bitcoin::blockdata::script::{Builder, ScriptBuf};
43 use bitcoin::blockdata::opcodes;
44 use bitcoin::blockdata::constants::ChainHash;
45 use bitcoin::network::constants::Network;
46 use bitcoin::{Sequence, Transaction, TxIn, TxOut, Witness};
47 use bitcoin::OutPoint as BitcoinOutPoint;
48
49 use bitcoin::secp256k1::Secp256k1;
50 use bitcoin::secp256k1::{PublicKey,SecretKey};
51
52 use crate::io;
53 use crate::prelude::*;
54 use alloc::collections::BTreeSet;
55 use core::iter::repeat;
56 use bitcoin::hashes::Hash;
57 use crate::sync::{Arc, Mutex, RwLock};
58
59 use crate::ln::functional_test_utils::*;
60 use crate::ln::chan_utils::CommitmentTransaction;
61
62 use super::channel::UNFUNDED_CHANNEL_AGE_LIMIT_TICKS;
63
64 #[test]
65 fn test_insane_channel_opens() {
66         // Stand up a network of 2 nodes
67         use crate::ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS;
68         let mut cfg = UserConfig::default();
69         cfg.channel_handshake_limits.max_funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1;
70         let chanmon_cfgs = create_chanmon_cfgs(2);
71         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
72         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(cfg)]);
73         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
74
75         // Instantiate channel parameters where we push the maximum msats given our
76         // funding satoshis
77         let channel_value_sat = 31337; // same as funding satoshis
78         let channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg);
79         let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000;
80
81         // Have node0 initiate a channel to node1 with aforementioned parameters
82         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_sat, push_msat, 42, None, None).unwrap();
83
84         // Extract the channel open message from node0 to node1
85         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
86
87         // Test helper that asserts we get the correct error string given a mutator
88         // that supposedly makes the channel open message insane
89         let insane_open_helper = |expected_error_str: &str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| {
90                 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &message_mutator(open_channel_message.clone()));
91                 let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
92                 assert_eq!(msg_events.len(), 1);
93                 let expected_regex = regex::Regex::new(expected_error_str).unwrap();
94                 if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] {
95                         match action {
96                                 &ErrorAction::SendErrorMessage { .. } => {
97                                         nodes[1].logger.assert_log_regex("lightning::ln::channelmanager", expected_regex, 1);
98                                 },
99                                 _ => panic!("unexpected event!"),
100                         }
101                 } else { assert!(false); }
102         };
103
104         use crate::ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT;
105
106         // Test all mutations that would make the channel open message insane
107         insane_open_helper(format!("Per our config, funding must be at most {}. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2).as_str(), |mut msg| { msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; msg });
108         insane_open_helper(format!("Funding must be smaller than the total bitcoin supply. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS).as_str(), |mut msg| { msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; msg });
109
110         insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.common_fields.funding_satoshis + 1; msg });
111
112         insane_open_helper(r"push_msat \d+ was larger than channel amount minus reserve \(\d+\)", |mut msg| { msg.push_msat = (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
113
114         insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.common_fields.dust_limit_satoshis = msg.common_fields.funding_satoshis + 1 ; msg });
115
116         insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.common_fields.htlc_minimum_msat = (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg });
117
118         insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.common_fields.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg });
119
120         insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { msg.common_fields.max_accepted_htlcs = 0; msg });
121
122         insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.common_fields.max_accepted_htlcs = 484; msg });
123 }
124
125 #[test]
126 fn test_funding_exceeds_no_wumbo_limit() {
127         // Test that if a peer does not support wumbo channels, we'll refuse to open a wumbo channel to
128         // them.
129         use crate::ln::channel::MAX_FUNDING_SATOSHIS_NO_WUMBO;
130         let chanmon_cfgs = create_chanmon_cfgs(2);
131         let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
132         *node_cfgs[1].override_init_features.borrow_mut() = Some(channelmanager::provided_init_features(&test_default_channel_config()).clear_wumbo());
133         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
134         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
135
136         match nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), MAX_FUNDING_SATOSHIS_NO_WUMBO + 1, 0, 42, None, None) {
137                 Err(APIError::APIMisuseError { err }) => {
138                         assert_eq!(format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, MAX_FUNDING_SATOSHIS_NO_WUMBO + 1), err);
139                 },
140                 _ => panic!()
141         }
142 }
143
144 fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
145         // A peer providing a channel_reserve_satoshis of 0 (or less than our dust limit) is insecure,
146         // but only for them. Because some LSPs do it with some level of trust of the clients (for a
147         // substantial UX improvement), we explicitly allow it. Because it's unlikely to happen often
148         // in normal testing, we test it explicitly here.
149         let chanmon_cfgs = create_chanmon_cfgs(2);
150         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
151         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
152         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
153         let default_config = UserConfig::default();
154
155         // Have node0 initiate a channel to node1 with aforementioned parameters
156         let mut push_amt = 100_000_000;
157         let feerate_per_kw = 253;
158         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
159         push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(&channel_type_features) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000;
160         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
161
162         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None, None).unwrap();
163         let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
164         if !send_from_initiator {
165                 open_channel_message.channel_reserve_satoshis = 0;
166                 open_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000;
167         }
168         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
169
170         // Extract the channel accept message from node1 to node0
171         let mut accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
172         if send_from_initiator {
173                 accept_channel_message.channel_reserve_satoshis = 0;
174                 accept_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000;
175         }
176         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
177         {
178                 let sender_node = if send_from_initiator { &nodes[1] } else { &nodes[0] };
179                 let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] };
180                 let mut sender_node_per_peer_lock;
181                 let mut sender_node_peer_state_lock;
182
183                 let channel_phase = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
184                 match channel_phase {
185                         ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
186                                 let chan_context = channel_phase.context_mut();
187                                 chan_context.holder_selected_channel_reserve_satoshis = 0;
188                                 chan_context.holder_max_htlc_value_in_flight_msat = 100_000_000;
189                         },
190                         _ => assert!(false),
191                 }
192         }
193
194         let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id);
195         let funding_msgs = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &funding_tx);
196         create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_msgs.0);
197
198         // nodes[0] should now be able to send the full balance to nodes[1], violating nodes[1]'s
199         // security model if it ever tries to send funds back to nodes[0] (but that's not our problem).
200         if send_from_initiator {
201                 send_payment(&nodes[0], &[&nodes[1]], 100_000_000
202                         // Note that for outbound channels we have to consider the commitment tx fee and the
203                         // "fee spike buffer", which is currently a multiple of the total commitment tx fee as
204                         // well as an additional HTLC.
205                         - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * commit_tx_fee_msat(feerate_per_kw, 2, &channel_type_features));
206         } else {
207                 send_payment(&nodes[1], &[&nodes[0]], push_amt);
208         }
209 }
210
211 #[test]
212 fn test_counterparty_no_reserve() {
213         do_test_counterparty_no_reserve(true);
214         do_test_counterparty_no_reserve(false);
215 }
216
217 #[test]
218 fn test_async_inbound_update_fee() {
219         let chanmon_cfgs = create_chanmon_cfgs(2);
220         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
221         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
222         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
223         create_announced_chan_between_nodes(&nodes, 0, 1);
224
225         // balancing
226         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
227
228         // A                                        B
229         // update_fee                            ->
230         // send (1) commitment_signed            -.
231         //                                       <- update_add_htlc/commitment_signed
232         // send (2) RAA (awaiting remote revoke) -.
233         // (1) commitment_signed is delivered    ->
234         //                                       .- send (3) RAA (awaiting remote revoke)
235         // (2) RAA is delivered                  ->
236         //                                       .- send (4) commitment_signed
237         //                                       <- (3) RAA is delivered
238         // send (5) commitment_signed            -.
239         //                                       <- (4) commitment_signed is delivered
240         // send (6) RAA                          -.
241         // (5) commitment_signed is delivered    ->
242         //                                       <- RAA
243         // (6) RAA is delivered                  ->
244
245         // First nodes[0] generates an update_fee
246         {
247                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
248                 *feerate_lock += 20;
249         }
250         nodes[0].node.timer_tick_occurred();
251         check_added_monitors!(nodes[0], 1);
252
253         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
254         assert_eq!(events_0.len(), 1);
255         let (update_msg, commitment_signed) = match events_0[0] { // (1)
256                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
257                         (update_fee.as_ref(), commitment_signed)
258                 },
259                 _ => panic!("Unexpected event"),
260         };
261
262         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
263
264         // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
265         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000);
266         nodes[1].node.send_payment_with_route(&route, our_payment_hash,
267                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
268         check_added_monitors!(nodes[1], 1);
269
270         let payment_event = {
271                 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
272                 assert_eq!(events_1.len(), 1);
273                 SendEvent::from_event(events_1.remove(0))
274         };
275         assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
276         assert_eq!(payment_event.msgs.len(), 1);
277
278         // ...now when the messages get delivered everyone should be happy
279         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
280         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2)
281         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
282         // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
283         check_added_monitors!(nodes[0], 1);
284
285         // deliver(1), generate (3):
286         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
287         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
288         // nodes[1] is awaiting nodes[0] revoke_and_ack so get_event_msg's assert(len == 1) passes
289         check_added_monitors!(nodes[1], 1);
290
291         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack); // deliver (2)
292         let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
293         assert!(bs_update.update_add_htlcs.is_empty()); // (4)
294         assert!(bs_update.update_fulfill_htlcs.is_empty()); // (4)
295         assert!(bs_update.update_fail_htlcs.is_empty()); // (4)
296         assert!(bs_update.update_fail_malformed_htlcs.is_empty()); // (4)
297         assert!(bs_update.update_fee.is_none()); // (4)
298         check_added_monitors!(nodes[1], 1);
299
300         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); // deliver (3)
301         let as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
302         assert!(as_update.update_add_htlcs.is_empty()); // (5)
303         assert!(as_update.update_fulfill_htlcs.is_empty()); // (5)
304         assert!(as_update.update_fail_htlcs.is_empty()); // (5)
305         assert!(as_update.update_fail_malformed_htlcs.is_empty()); // (5)
306         assert!(as_update.update_fee.is_none()); // (5)
307         check_added_monitors!(nodes[0], 1);
308
309         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed); // deliver (4)
310         let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
311         // only (6) so get_event_msg's assert(len == 1) passes
312         check_added_monitors!(nodes[0], 1);
313
314         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update.commitment_signed); // deliver (5)
315         let bs_second_revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
316         check_added_monitors!(nodes[1], 1);
317
318         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke);
319         check_added_monitors!(nodes[0], 1);
320
321         let events_2 = nodes[0].node.get_and_clear_pending_events();
322         assert_eq!(events_2.len(), 1);
323         match events_2[0] {
324                 Event::PendingHTLCsForwardable {..} => {}, // If we actually processed we'd receive the payment
325                 _ => panic!("Unexpected event"),
326         }
327
328         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke); // deliver (6)
329         check_added_monitors!(nodes[1], 1);
330 }
331
332 #[test]
333 fn test_update_fee_unordered_raa() {
334         // Just the intro to the previous test followed by an out-of-order RAA (which caused a
335         // crash in an earlier version of the update_fee patch)
336         let chanmon_cfgs = create_chanmon_cfgs(2);
337         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
338         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
339         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
340         create_announced_chan_between_nodes(&nodes, 0, 1);
341
342         // balancing
343         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
344
345         // First nodes[0] generates an update_fee
346         {
347                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
348                 *feerate_lock += 20;
349         }
350         nodes[0].node.timer_tick_occurred();
351         check_added_monitors!(nodes[0], 1);
352
353         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
354         assert_eq!(events_0.len(), 1);
355         let update_msg = match events_0[0] { // (1)
356                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
357                         update_fee.as_ref()
358                 },
359                 _ => panic!("Unexpected event"),
360         };
361
362         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
363
364         // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
365         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000);
366         nodes[1].node.send_payment_with_route(&route, our_payment_hash,
367                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
368         check_added_monitors!(nodes[1], 1);
369
370         let payment_event = {
371                 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
372                 assert_eq!(events_1.len(), 1);
373                 SendEvent::from_event(events_1.remove(0))
374         };
375         assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
376         assert_eq!(payment_event.msgs.len(), 1);
377
378         // ...now when the messages get delivered everyone should be happy
379         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
380         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2)
381         let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
382         // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
383         check_added_monitors!(nodes[0], 1);
384
385         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg); // deliver (2)
386         check_added_monitors!(nodes[1], 1);
387
388         // We can't continue, sadly, because our (1) now has a bogus signature
389 }
390
391 #[test]
392 fn test_multi_flight_update_fee() {
393         let chanmon_cfgs = create_chanmon_cfgs(2);
394         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
395         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
396         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
397         create_announced_chan_between_nodes(&nodes, 0, 1);
398
399         // A                                        B
400         // update_fee/commitment_signed          ->
401         //                                       .- send (1) RAA and (2) commitment_signed
402         // update_fee (never committed)          ->
403         // (3) update_fee                        ->
404         // We have to manually generate the above update_fee, it is allowed by the protocol but we
405         // don't track which updates correspond to which revoke_and_ack responses so we're in
406         // AwaitingRAA mode and will not generate the update_fee yet.
407         //                                       <- (1) RAA delivered
408         // (3) is generated and send (4) CS      -.
409         // Note that A cannot generate (4) prior to (1) being delivered as it otherwise doesn't
410         // know the per_commitment_point to use for it.
411         //                                       <- (2) commitment_signed delivered
412         // revoke_and_ack                        ->
413         //                                          B should send no response here
414         // (4) commitment_signed delivered       ->
415         //                                       <- RAA/commitment_signed delivered
416         // revoke_and_ack                        ->
417
418         // First nodes[0] generates an update_fee
419         let initial_feerate;
420         {
421                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
422                 initial_feerate = *feerate_lock;
423                 *feerate_lock = initial_feerate + 20;
424         }
425         nodes[0].node.timer_tick_occurred();
426         check_added_monitors!(nodes[0], 1);
427
428         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
429         assert_eq!(events_0.len(), 1);
430         let (update_msg_1, commitment_signed_1) = match events_0[0] { // (1)
431                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
432                         (update_fee.as_ref().unwrap(), commitment_signed)
433                 },
434                 _ => panic!("Unexpected event"),
435         };
436
437         // Deliver first update_fee/commitment_signed pair, generating (1) and (2):
438         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg_1);
439         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed_1);
440         let (bs_revoke_msg, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
441         check_added_monitors!(nodes[1], 1);
442
443         // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment
444         // transaction:
445         {
446                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
447                 *feerate_lock = initial_feerate + 40;
448         }
449         nodes[0].node.timer_tick_occurred();
450         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
451         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
452
453         // Create the (3) update_fee message that nodes[0] will generate before it does...
454         let mut update_msg_2 = msgs::UpdateFee {
455                 channel_id: update_msg_1.channel_id.clone(),
456                 feerate_per_kw: (initial_feerate + 30) as u32,
457         };
458
459         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2);
460
461         update_msg_2.feerate_per_kw = (initial_feerate + 40) as u32;
462         // Deliver (3)
463         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2);
464
465         // Deliver (1), generating (3) and (4)
466         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg);
467         let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
468         check_added_monitors!(nodes[0], 1);
469         assert!(as_second_update.update_add_htlcs.is_empty());
470         assert!(as_second_update.update_fulfill_htlcs.is_empty());
471         assert!(as_second_update.update_fail_htlcs.is_empty());
472         assert!(as_second_update.update_fail_malformed_htlcs.is_empty());
473         // Check that the update_fee newly generated matches what we delivered:
474         assert_eq!(as_second_update.update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id);
475         assert_eq!(as_second_update.update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw);
476
477         // Deliver (2) commitment_signed
478         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
479         let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
480         check_added_monitors!(nodes[0], 1);
481         // No commitment_signed so get_event_msg's assert(len == 1) passes
482
483         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg);
484         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
485         check_added_monitors!(nodes[1], 1);
486
487         // Delever (4)
488         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed);
489         let (bs_second_revoke, bs_second_commitment) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
490         check_added_monitors!(nodes[1], 1);
491
492         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke);
493         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
494         check_added_monitors!(nodes[0], 1);
495
496         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment);
497         let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
498         // No commitment_signed so get_event_msg's assert(len == 1) passes
499         check_added_monitors!(nodes[0], 1);
500
501         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke);
502         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
503         check_added_monitors!(nodes[1], 1);
504 }
505
506 fn do_test_sanity_on_in_flight_opens(steps: u8) {
507         // Previously, we had issues deserializing channels when we hadn't connected the first block
508         // after creation. To catch that and similar issues, we lean on the Node::drop impl to test
509         // serialization round-trips and simply do steps towards opening a channel and then drop the
510         // Node objects.
511
512         let chanmon_cfgs = create_chanmon_cfgs(2);
513         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
514         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
515         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
516
517         if steps & 0b1000_0000 != 0{
518                 let block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new());
519                 connect_block(&nodes[0], &block);
520                 connect_block(&nodes[1], &block);
521         }
522
523         if steps & 0x0f == 0 { return; }
524         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
525         let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
526
527         if steps & 0x0f == 1 { return; }
528         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
529         let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
530
531         if steps & 0x0f == 2 { return; }
532         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
533
534         let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
535
536         if steps & 0x0f == 3 { return; }
537         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
538         check_added_monitors!(nodes[0], 0);
539         let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
540
541         if steps & 0x0f == 4 { return; }
542         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
543         {
544                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
545                 assert_eq!(added_monitors.len(), 1);
546                 assert_eq!(added_monitors[0].0, funding_output);
547                 added_monitors.clear();
548         }
549         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
550
551         let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
552
553         if steps & 0x0f == 5 { return; }
554         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
555         {
556                 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
557                 assert_eq!(added_monitors.len(), 1);
558                 assert_eq!(added_monitors[0].0, funding_output);
559                 added_monitors.clear();
560         }
561
562         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
563         let events_4 = nodes[0].node.get_and_clear_pending_events();
564         assert_eq!(events_4.len(), 0);
565
566         if steps & 0x0f == 6 { return; }
567         create_chan_between_nodes_with_value_confirm_first(&nodes[0], &nodes[1], &tx, 2);
568
569         if steps & 0x0f == 7 { return; }
570         confirm_transaction_at(&nodes[0], &tx, 2);
571         connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
572         create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
573         expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id());
574 }
575
576 #[test]
577 fn test_sanity_on_in_flight_opens() {
578         do_test_sanity_on_in_flight_opens(0);
579         do_test_sanity_on_in_flight_opens(0 | 0b1000_0000);
580         do_test_sanity_on_in_flight_opens(1);
581         do_test_sanity_on_in_flight_opens(1 | 0b1000_0000);
582         do_test_sanity_on_in_flight_opens(2);
583         do_test_sanity_on_in_flight_opens(2 | 0b1000_0000);
584         do_test_sanity_on_in_flight_opens(3);
585         do_test_sanity_on_in_flight_opens(3 | 0b1000_0000);
586         do_test_sanity_on_in_flight_opens(4);
587         do_test_sanity_on_in_flight_opens(4 | 0b1000_0000);
588         do_test_sanity_on_in_flight_opens(5);
589         do_test_sanity_on_in_flight_opens(5 | 0b1000_0000);
590         do_test_sanity_on_in_flight_opens(6);
591         do_test_sanity_on_in_flight_opens(6 | 0b1000_0000);
592         do_test_sanity_on_in_flight_opens(7);
593         do_test_sanity_on_in_flight_opens(7 | 0b1000_0000);
594         do_test_sanity_on_in_flight_opens(8);
595         do_test_sanity_on_in_flight_opens(8 | 0b1000_0000);
596 }
597
598 #[test]
599 fn test_update_fee_vanilla() {
600         let chanmon_cfgs = create_chanmon_cfgs(2);
601         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
602         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
603         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
604         create_announced_chan_between_nodes(&nodes, 0, 1);
605
606         {
607                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
608                 *feerate_lock += 25;
609         }
610         nodes[0].node.timer_tick_occurred();
611         check_added_monitors!(nodes[0], 1);
612
613         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
614         assert_eq!(events_0.len(), 1);
615         let (update_msg, commitment_signed) = match events_0[0] {
616                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
617                         (update_fee.as_ref(), commitment_signed)
618                 },
619                 _ => panic!("Unexpected event"),
620         };
621         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
622
623         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
624         let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
625         check_added_monitors!(nodes[1], 1);
626
627         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
628         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
629         check_added_monitors!(nodes[0], 1);
630
631         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
632         let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
633         // No commitment_signed so get_event_msg's assert(len == 1) passes
634         check_added_monitors!(nodes[0], 1);
635
636         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
637         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
638         check_added_monitors!(nodes[1], 1);
639 }
640
641 #[test]
642 fn test_update_fee_that_funder_cannot_afford() {
643         let chanmon_cfgs = create_chanmon_cfgs(2);
644         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
645         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
646         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
647         let channel_value = 5000;
648         let push_sats = 700;
649         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, push_sats * 1000);
650         let channel_id = chan.2;
651         let secp_ctx = Secp256k1::new();
652         let default_config = UserConfig::default();
653         let bs_channel_reserve_sats = get_holder_selected_channel_reserve_satoshis(channel_value, &default_config);
654
655         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
656
657         // Calculate the maximum feerate that A can afford. Note that we don't send an update_fee
658         // CONCURRENT_INBOUND_HTLC_FEE_BUFFER HTLCs before actually running out of local balance, so we
659         // calculate two different feerates here - the expected local limit as well as the expected
660         // remote limit.
661         let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / (commitment_tx_base_weight(&channel_type_features) + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32;
662         let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / commitment_tx_base_weight(&channel_type_features)) as u32;
663         {
664                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
665                 *feerate_lock = feerate;
666         }
667         nodes[0].node.timer_tick_occurred();
668         check_added_monitors!(nodes[0], 1);
669         let update_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
670
671         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg.update_fee.unwrap());
672
673         commitment_signed_dance!(nodes[1], nodes[0], update_msg.commitment_signed, false);
674
675         // Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate set above.
676         {
677                 let commitment_tx = get_local_commitment_txn!(nodes[1], channel_id)[0].clone();
678
679                 //We made sure neither party's funds are below the dust limit and there are no HTLCs here
680                 assert_eq!(commitment_tx.output.len(), 2);
681                 let total_fee: u64 = commit_tx_fee_msat(feerate, 0, &channel_type_features) / 1000;
682                 let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value);
683                 actual_fee = channel_value - actual_fee;
684                 assert_eq!(total_fee, actual_fee);
685         }
686
687         {
688                 // Increment the feerate by a small constant, accounting for rounding errors
689                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
690                 *feerate_lock += 4;
691         }
692         nodes[0].node.timer_tick_occurred();
693         nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot afford to send new feerate at {}", feerate + 4), 1);
694         check_added_monitors!(nodes[0], 0);
695
696         const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654;
697
698         // Get the TestChannelSigner for each channel, which will be used to (1) get the keys
699         // needed to sign the new commitment tx and (2) sign the new commitment tx.
700         let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = {
701                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
702                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
703                 let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
704                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
705                 ).flatten().unwrap();
706                 let chan_signer = local_chan.get_signer();
707                 let pubkeys = chan_signer.as_ref().pubkeys();
708                 (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
709                  pubkeys.funding_pubkey)
710         };
711         let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point, remote_funding) = {
712                 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
713                 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
714                 let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
715                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
716                 ).flatten().unwrap();
717                 let chan_signer = remote_chan.get_signer();
718                 let pubkeys = chan_signer.as_ref().pubkeys();
719                 (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
720                  chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
721                  pubkeys.funding_pubkey)
722         };
723
724         // Assemble the set of keys we can use for signatures for our commitment_signed message.
725         let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint,
726                 &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint);
727
728         let res = {
729                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
730                 let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
731                 let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
732                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
733                 ).flatten().unwrap();
734                 let local_chan_signer = local_chan.get_signer();
735                 let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![];
736                 let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
737                         INITIAL_COMMITMENT_NUMBER - 1,
738                         push_sats,
739                         channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, &channel_type_features) / 1000,
740                         local_funding, remote_funding,
741                         commit_tx_keys.clone(),
742                         non_buffer_feerate + 4,
743                         &mut htlcs,
744                         &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
745                 );
746                 local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), Vec::new(), &secp_ctx).unwrap()
747         };
748
749         let commit_signed_msg = msgs::CommitmentSigned {
750                 channel_id: chan.2,
751                 signature: res.0,
752                 htlc_signatures: res.1,
753                 #[cfg(taproot)]
754                 partial_signature_with_nonce: None,
755         };
756
757         let update_fee = msgs::UpdateFee {
758                 channel_id: chan.2,
759                 feerate_per_kw: non_buffer_feerate + 4,
760         };
761
762         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_fee);
763
764         //While producing the commitment_signed response after handling a received update_fee request the
765         //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve)
766         //Should produce and error.
767         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
768         nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee", 3);
769         check_added_monitors!(nodes[1], 1);
770         check_closed_broadcast!(nodes[1], true);
771         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") },
772                 [nodes[0].node.get_our_node_id()], channel_value);
773 }
774
775 #[test]
776 fn test_update_fee_with_fundee_update_add_htlc() {
777         let chanmon_cfgs = create_chanmon_cfgs(2);
778         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
779         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
780         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
781         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
782
783         // balancing
784         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
785
786         {
787                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
788                 *feerate_lock += 20;
789         }
790         nodes[0].node.timer_tick_occurred();
791         check_added_monitors!(nodes[0], 1);
792
793         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
794         assert_eq!(events_0.len(), 1);
795         let (update_msg, commitment_signed) = match events_0[0] {
796                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
797                         (update_fee.as_ref(), commitment_signed)
798                 },
799                 _ => panic!("Unexpected event"),
800         };
801         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
802         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
803         let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
804         check_added_monitors!(nodes[1], 1);
805
806         let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 800000);
807
808         // nothing happens since node[1] is in AwaitingRemoteRevoke
809         nodes[1].node.send_payment_with_route(&route, our_payment_hash,
810                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
811         {
812                 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
813                 assert_eq!(added_monitors.len(), 0);
814                 added_monitors.clear();
815         }
816         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
817         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
818         // node[1] has nothing to do
819
820         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
821         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
822         check_added_monitors!(nodes[0], 1);
823
824         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
825         let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
826         // No commitment_signed so get_event_msg's assert(len == 1) passes
827         check_added_monitors!(nodes[0], 1);
828         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
829         check_added_monitors!(nodes[1], 1);
830         // AwaitingRemoteRevoke ends here
831
832         let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
833         assert_eq!(commitment_update.update_add_htlcs.len(), 1);
834         assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0);
835         assert_eq!(commitment_update.update_fail_htlcs.len(), 0);
836         assert_eq!(commitment_update.update_fail_malformed_htlcs.len(), 0);
837         assert_eq!(commitment_update.update_fee.is_none(), true);
838
839         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &commitment_update.update_add_htlcs[0]);
840         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed);
841         check_added_monitors!(nodes[0], 1);
842         let (revoke, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
843
844         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke);
845         check_added_monitors!(nodes[1], 1);
846         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
847
848         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed);
849         check_added_monitors!(nodes[1], 1);
850         let revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
851         // No commitment_signed so get_event_msg's assert(len == 1) passes
852
853         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke);
854         check_added_monitors!(nodes[0], 1);
855         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
856
857         expect_pending_htlcs_forwardable!(nodes[0]);
858
859         let events = nodes[0].node.get_and_clear_pending_events();
860         assert_eq!(events.len(), 1);
861         match events[0] {
862                 Event::PaymentClaimable { .. } => { },
863                 _ => panic!("Unexpected event"),
864         };
865
866         claim_payment(&nodes[1], &vec!(&nodes[0])[..], our_payment_preimage);
867
868         send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
869         send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
870         close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
871         check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
872         check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
873 }
874
875 #[test]
876 fn test_update_fee() {
877         let chanmon_cfgs = create_chanmon_cfgs(2);
878         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
879         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
880         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
881         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
882         let channel_id = chan.2;
883
884         // A                                        B
885         // (1) update_fee/commitment_signed      ->
886         //                                       <- (2) revoke_and_ack
887         //                                       .- send (3) commitment_signed
888         // (4) update_fee/commitment_signed      ->
889         //                                       .- send (5) revoke_and_ack (no CS as we're awaiting a revoke)
890         //                                       <- (3) commitment_signed delivered
891         // send (6) revoke_and_ack               -.
892         //                                       <- (5) deliver revoke_and_ack
893         // (6) deliver revoke_and_ack            ->
894         //                                       .- send (7) commitment_signed in response to (4)
895         //                                       <- (7) deliver commitment_signed
896         // revoke_and_ack                        ->
897
898         // Create and deliver (1)...
899         let feerate;
900         {
901                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
902                 feerate = *feerate_lock;
903                 *feerate_lock = feerate + 20;
904         }
905         nodes[0].node.timer_tick_occurred();
906         check_added_monitors!(nodes[0], 1);
907
908         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
909         assert_eq!(events_0.len(), 1);
910         let (update_msg, commitment_signed) = match events_0[0] {
911                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
912                         (update_fee.as_ref(), commitment_signed)
913                 },
914                 _ => panic!("Unexpected event"),
915         };
916         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
917
918         // Generate (2) and (3):
919         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
920         let (revoke_msg, commitment_signed_0) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
921         check_added_monitors!(nodes[1], 1);
922
923         // Deliver (2):
924         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
925         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
926         check_added_monitors!(nodes[0], 1);
927
928         // Create and deliver (4)...
929         {
930                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
931                 *feerate_lock = feerate + 30;
932         }
933         nodes[0].node.timer_tick_occurred();
934         check_added_monitors!(nodes[0], 1);
935         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
936         assert_eq!(events_0.len(), 1);
937         let (update_msg, commitment_signed) = match events_0[0] {
938                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
939                         (update_fee.as_ref(), commitment_signed)
940                 },
941                 _ => panic!("Unexpected event"),
942         };
943
944         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
945         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
946         check_added_monitors!(nodes[1], 1);
947         // ... creating (5)
948         let revoke_msg = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
949         // No commitment_signed so get_event_msg's assert(len == 1) passes
950
951         // Handle (3), creating (6):
952         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_0);
953         check_added_monitors!(nodes[0], 1);
954         let revoke_msg_0 = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
955         // No commitment_signed so get_event_msg's assert(len == 1) passes
956
957         // Deliver (5):
958         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
959         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
960         check_added_monitors!(nodes[0], 1);
961
962         // Deliver (6), creating (7):
963         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg_0);
964         let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
965         assert!(commitment_update.update_add_htlcs.is_empty());
966         assert!(commitment_update.update_fulfill_htlcs.is_empty());
967         assert!(commitment_update.update_fail_htlcs.is_empty());
968         assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
969         assert!(commitment_update.update_fee.is_none());
970         check_added_monitors!(nodes[1], 1);
971
972         // Deliver (7)
973         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed);
974         check_added_monitors!(nodes[0], 1);
975         let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
976         // No commitment_signed so get_event_msg's assert(len == 1) passes
977
978         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
979         check_added_monitors!(nodes[1], 1);
980         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
981
982         assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30);
983         assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30);
984         close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
985         check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
986         check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
987 }
988
989 #[test]
990 fn fake_network_test() {
991         // Simple test which builds a network of ChannelManagers, connects them to each other, and
992         // tests that payments get routed and transactions broadcast in semi-reasonable ways.
993         let chanmon_cfgs = create_chanmon_cfgs(4);
994         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
995         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
996         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
997
998         // Create some initial channels
999         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1000         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
1001         let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
1002
1003         // Rebalance the network a bit by relaying one payment through all the channels...
1004         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1005         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1006         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1007         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1008
1009         // Send some more payments
1010         send_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 1000000);
1011         send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1], &nodes[0])[..], 1000000);
1012         send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1])[..], 1000000);
1013
1014         // Test failure packets
1015         let payment_hash_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 1000000).1;
1016         fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], payment_hash_1);
1017
1018         // Add a new channel that skips 3
1019         let chan_4 = create_announced_chan_between_nodes(&nodes, 1, 3);
1020
1021         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 1000000);
1022         send_payment(&nodes[2], &vec!(&nodes[3])[..], 1000000);
1023         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1024         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1025         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1026         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1027         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1028
1029         // Do some rebalance loop payments, simultaneously
1030         let mut hops = Vec::with_capacity(3);
1031         hops.push(RouteHop {
1032                 pubkey: nodes[2].node.get_our_node_id(),
1033                 node_features: NodeFeatures::empty(),
1034                 short_channel_id: chan_2.0.contents.short_channel_id,
1035                 channel_features: ChannelFeatures::empty(),
1036                 fee_msat: 0,
1037                 cltv_expiry_delta: chan_3.0.contents.cltv_expiry_delta as u32,
1038                 maybe_announced_channel: true,
1039         });
1040         hops.push(RouteHop {
1041                 pubkey: nodes[3].node.get_our_node_id(),
1042                 node_features: NodeFeatures::empty(),
1043                 short_channel_id: chan_3.0.contents.short_channel_id,
1044                 channel_features: ChannelFeatures::empty(),
1045                 fee_msat: 0,
1046                 cltv_expiry_delta: chan_4.1.contents.cltv_expiry_delta as u32,
1047                 maybe_announced_channel: true,
1048         });
1049         hops.push(RouteHop {
1050                 pubkey: nodes[1].node.get_our_node_id(),
1051                 node_features: nodes[1].node.node_features(),
1052                 short_channel_id: chan_4.0.contents.short_channel_id,
1053                 channel_features: nodes[1].node.channel_features(),
1054                 fee_msat: 1000000,
1055                 cltv_expiry_delta: TEST_FINAL_CLTV,
1056                 maybe_announced_channel: true,
1057         });
1058         hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
1059         hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
1060         let payment_preimage_1 = send_along_route(&nodes[1],
1061                 Route { paths: vec![Path { hops, blinded_tail: None }], route_params: None },
1062                         &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0;
1063
1064         let mut hops = Vec::with_capacity(3);
1065         hops.push(RouteHop {
1066                 pubkey: nodes[3].node.get_our_node_id(),
1067                 node_features: NodeFeatures::empty(),
1068                 short_channel_id: chan_4.0.contents.short_channel_id,
1069                 channel_features: ChannelFeatures::empty(),
1070                 fee_msat: 0,
1071                 cltv_expiry_delta: chan_3.1.contents.cltv_expiry_delta as u32,
1072                 maybe_announced_channel: true,
1073         });
1074         hops.push(RouteHop {
1075                 pubkey: nodes[2].node.get_our_node_id(),
1076                 node_features: NodeFeatures::empty(),
1077                 short_channel_id: chan_3.0.contents.short_channel_id,
1078                 channel_features: ChannelFeatures::empty(),
1079                 fee_msat: 0,
1080                 cltv_expiry_delta: chan_2.1.contents.cltv_expiry_delta as u32,
1081                 maybe_announced_channel: true,
1082         });
1083         hops.push(RouteHop {
1084                 pubkey: nodes[1].node.get_our_node_id(),
1085                 node_features: nodes[1].node.node_features(),
1086                 short_channel_id: chan_2.0.contents.short_channel_id,
1087                 channel_features: nodes[1].node.channel_features(),
1088                 fee_msat: 1000000,
1089                 cltv_expiry_delta: TEST_FINAL_CLTV,
1090                 maybe_announced_channel: true,
1091         });
1092         hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
1093         hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
1094         let payment_hash_2 = send_along_route(&nodes[1],
1095                 Route { paths: vec![Path { hops, blinded_tail: None }], route_params: None },
1096                         &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1;
1097
1098         // Claim the rebalances...
1099         fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
1100         claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
1101
1102         // Close down the channels...
1103         close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
1104         check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1105         check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
1106         close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
1107         check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
1108         check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1109         close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
1110         check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
1111         check_closed_event!(nodes[3], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
1112         close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
1113         check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
1114         check_closed_event!(nodes[3], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1115 }
1116
1117 #[test]
1118 fn holding_cell_htlc_counting() {
1119         // Tests that HTLCs in the holding cell count towards the pending HTLC limits on outbound HTLCs
1120         // to ensure we don't end up with HTLCs sitting around in our holding cell for several
1121         // commitment dance rounds.
1122         let chanmon_cfgs = create_chanmon_cfgs(3);
1123         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1124         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1125         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1126         create_announced_chan_between_nodes(&nodes, 0, 1);
1127         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
1128
1129         // Fetch a route in advance as we will be unable to once we're unable to send.
1130         let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
1131
1132         let mut payments = Vec::new();
1133         for _ in 0..50 {
1134                 let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
1135                 nodes[1].node.send_payment_with_route(&route, payment_hash,
1136                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
1137                 payments.push((payment_preimage, payment_hash));
1138         }
1139         check_added_monitors!(nodes[1], 1);
1140
1141         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
1142         assert_eq!(events.len(), 1);
1143         let initial_payment_event = SendEvent::from_event(events.pop().unwrap());
1144         assert_eq!(initial_payment_event.node_id, nodes[2].node.get_our_node_id());
1145
1146         // There is now one HTLC in an outbound commitment transaction and (OUR_MAX_HTLCS - 1) HTLCs in
1147         // the holding cell waiting on B's RAA to send. At this point we should not be able to add
1148         // another HTLC.
1149         {
1150                 unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, payment_hash_1,
1151                                 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)
1152                         ), true, APIError::ChannelUnavailable { .. }, {});
1153                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1154         }
1155
1156         // This should also be true if we try to forward a payment.
1157         let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
1158         {
1159                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1160                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1161                 check_added_monitors!(nodes[0], 1);
1162         }
1163
1164         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1165         assert_eq!(events.len(), 1);
1166         let payment_event = SendEvent::from_event(events.pop().unwrap());
1167         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1168
1169         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1170         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
1171         // We have to forward pending HTLCs twice - once tries to forward the payment forward (and
1172         // fails), the second will process the resulting failure and fail the HTLC backward.
1173         expect_pending_htlcs_forwardable!(nodes[1]);
1174         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
1175         check_added_monitors!(nodes[1], 1);
1176
1177         let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1178         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_updates.update_fail_htlcs[0]);
1179         commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true);
1180
1181         expect_payment_failed_with_update!(nodes[0], payment_hash_2, false, chan_2.0.contents.short_channel_id, false);
1182
1183         // Now forward all the pending HTLCs and claim them back
1184         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &initial_payment_event.msgs[0]);
1185         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &initial_payment_event.commitment_msg);
1186         check_added_monitors!(nodes[2], 1);
1187
1188         let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1189         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1190         check_added_monitors!(nodes[1], 1);
1191         let as_updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1192
1193         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed);
1194         check_added_monitors!(nodes[1], 1);
1195         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1196
1197         for ref update in as_updates.update_add_htlcs.iter() {
1198                 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), update);
1199         }
1200         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_updates.commitment_signed);
1201         check_added_monitors!(nodes[2], 1);
1202         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
1203         check_added_monitors!(nodes[2], 1);
1204         let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1205
1206         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1207         check_added_monitors!(nodes[1], 1);
1208         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed);
1209         check_added_monitors!(nodes[1], 1);
1210         let as_final_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1211
1212         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_final_raa);
1213         check_added_monitors!(nodes[2], 1);
1214
1215         expect_pending_htlcs_forwardable!(nodes[2]);
1216
1217         let events = nodes[2].node.get_and_clear_pending_events();
1218         assert_eq!(events.len(), payments.len());
1219         for (event, &(_, ref hash)) in events.iter().zip(payments.iter()) {
1220                 match event {
1221                         &Event::PaymentClaimable { ref payment_hash, .. } => {
1222                                 assert_eq!(*payment_hash, *hash);
1223                         },
1224                         _ => panic!("Unexpected event"),
1225                 };
1226         }
1227
1228         for (preimage, _) in payments.drain(..) {
1229                 claim_payment(&nodes[1], &[&nodes[2]], preimage);
1230         }
1231
1232         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
1233 }
1234
1235 #[test]
1236 fn duplicate_htlc_test() {
1237         // Test that we accept duplicate payment_hash HTLCs across the network and that
1238         // claiming/failing them are all separate and don't affect each other
1239         let chanmon_cfgs = create_chanmon_cfgs(6);
1240         let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
1241         let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs, &[None, None, None, None, None, None]);
1242         let mut nodes = create_network(6, &node_cfgs, &node_chanmgrs);
1243
1244         // Create some initial channels to route via 3 to 4/5 from 0/1/2
1245         create_announced_chan_between_nodes(&nodes, 0, 3);
1246         create_announced_chan_between_nodes(&nodes, 1, 3);
1247         create_announced_chan_between_nodes(&nodes, 2, 3);
1248         create_announced_chan_between_nodes(&nodes, 3, 4);
1249         create_announced_chan_between_nodes(&nodes, 3, 5);
1250
1251         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000);
1252
1253         *nodes[0].network_payment_count.borrow_mut() -= 1;
1254         assert_eq!(route_payment(&nodes[1], &vec!(&nodes[3])[..], 1000000).0, payment_preimage);
1255
1256         *nodes[0].network_payment_count.borrow_mut() -= 1;
1257         assert_eq!(route_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], 1000000).0, payment_preimage);
1258
1259         claim_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], payment_preimage);
1260         fail_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], payment_hash);
1261         claim_payment(&nodes[1], &vec!(&nodes[3])[..], payment_preimage);
1262 }
1263
1264 #[test]
1265 fn test_duplicate_htlc_different_direction_onchain() {
1266         // Test that ChannelMonitor doesn't generate 2 preimage txn
1267         // when we have 2 HTLCs with same preimage that go across a node
1268         // in opposite directions, even with the same payment secret.
1269         let chanmon_cfgs = create_chanmon_cfgs(2);
1270         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1271         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1272         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1273
1274         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1275
1276         // balancing
1277         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
1278
1279         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000);
1280
1281         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], 800_000);
1282         let node_a_payment_secret = nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
1283         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[0]]], 800_000, payment_hash, node_a_payment_secret);
1284
1285         // Provide preimage to node 0 by claiming payment
1286         nodes[0].node.claim_funds(payment_preimage);
1287         expect_payment_claimed!(nodes[0], payment_hash, 800_000);
1288         check_added_monitors!(nodes[0], 1);
1289
1290         // Broadcast node 1 commitment txn
1291         let remote_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
1292
1293         assert_eq!(remote_txn[0].output.len(), 4); // 1 local, 1 remote, 1 htlc inbound, 1 htlc outbound
1294         let mut has_both_htlcs = 0; // check htlcs match ones committed
1295         for outp in remote_txn[0].output.iter() {
1296                 if outp.value == 800_000 / 1000 {
1297                         has_both_htlcs += 1;
1298                 } else if outp.value == 900_000 / 1000 {
1299                         has_both_htlcs += 1;
1300                 }
1301         }
1302         assert_eq!(has_both_htlcs, 2);
1303
1304         mine_transaction(&nodes[0], &remote_txn[0]);
1305         check_added_monitors!(nodes[0], 1);
1306         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
1307         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
1308
1309         let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
1310         assert_eq!(claim_txn.len(), 3);
1311
1312         check_spends!(claim_txn[0], remote_txn[0]); // Immediate HTLC claim with preimage
1313         check_spends!(claim_txn[1], remote_txn[0]);
1314         check_spends!(claim_txn[2], remote_txn[0]);
1315         let preimage_tx = &claim_txn[0];
1316         let (preimage_bump_tx, timeout_tx) = if claim_txn[1].input[0].previous_output == preimage_tx.input[0].previous_output {
1317                 (&claim_txn[1], &claim_txn[2])
1318         } else {
1319                 (&claim_txn[2], &claim_txn[1])
1320         };
1321
1322         assert_eq!(preimage_tx.input.len(), 1);
1323         assert_eq!(preimage_bump_tx.input.len(), 1);
1324
1325         assert_eq!(preimage_tx.input.len(), 1);
1326         assert_eq!(preimage_tx.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC 1 <--> 0, preimage tx
1327         assert_eq!(remote_txn[0].output[preimage_tx.input[0].previous_output.vout as usize].value, 800);
1328
1329         assert_eq!(timeout_tx.input.len(), 1);
1330         assert_eq!(timeout_tx.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // HTLC 0 <--> 1, timeout tx
1331         check_spends!(timeout_tx, remote_txn[0]);
1332         assert_eq!(remote_txn[0].output[timeout_tx.input[0].previous_output.vout as usize].value, 900);
1333
1334         let events = nodes[0].node.get_and_clear_pending_msg_events();
1335         assert_eq!(events.len(), 3);
1336         for e in events {
1337                 match e {
1338                         MessageSendEvent::BroadcastChannelUpdate { .. } => {},
1339                         MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::DisconnectPeer { ref msg } } => {
1340                                 assert_eq!(node_id, nodes[1].node.get_our_node_id());
1341                                 assert_eq!(msg.as_ref().unwrap().data, "Channel closed because commitment or closing transaction was confirmed on chain.");
1342                         },
1343                         MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
1344                                 assert!(update_add_htlcs.is_empty());
1345                                 assert!(update_fail_htlcs.is_empty());
1346                                 assert_eq!(update_fulfill_htlcs.len(), 1);
1347                                 assert!(update_fail_malformed_htlcs.is_empty());
1348                                 assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
1349                         },
1350                         _ => panic!("Unexpected event"),
1351                 }
1352         }
1353 }
1354
1355 #[test]
1356 fn test_basic_channel_reserve() {
1357         let chanmon_cfgs = create_chanmon_cfgs(2);
1358         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1359         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1360         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1361         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1362
1363         let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
1364         let channel_reserve = chan_stat.channel_reserve_msat;
1365
1366         // The 2* and +1 are for the fee spike reserve.
1367         let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], nodes[1], chan.2), 1 + 1, &get_channel_type_features!(nodes[0], nodes[1], chan.2));
1368         let max_can_send = 5000000 - channel_reserve - commit_tx_fee;
1369         let (mut route, our_payment_hash, _, our_payment_secret) =
1370                 get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
1371         route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
1372         let err = nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1373                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).err().unwrap();
1374         match err {
1375                 PaymentSendFailure::AllFailedResendSafe(ref fails) => {
1376                         if let &APIError::ChannelUnavailable { .. } = &fails[0] {}
1377                         else { panic!("Unexpected error variant"); }
1378                 },
1379                 _ => panic!("Unexpected error variant"),
1380         }
1381         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1382
1383         send_payment(&nodes[0], &vec![&nodes[1]], max_can_send);
1384 }
1385
1386 #[test]
1387 fn test_fee_spike_violation_fails_htlc() {
1388         let chanmon_cfgs = create_chanmon_cfgs(2);
1389         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1390         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1391         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1392         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1393
1394         let (mut route, payment_hash, _, payment_secret) =
1395                 get_route_and_payment_hash!(nodes[0], nodes[1], 3460000);
1396         route.paths[0].hops[0].fee_msat += 1;
1397         // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1398         let secp_ctx = Secp256k1::new();
1399         let session_priv = SecretKey::from_slice(&[42; 32]).expect("RNG is bad!");
1400
1401         let cur_height = nodes[1].node.best_block.read().unwrap().height + 1;
1402
1403         let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
1404         let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
1405                 3460001, RecipientOnionFields::secret_only(payment_secret), cur_height, &None).unwrap();
1406         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
1407         let msg = msgs::UpdateAddHTLC {
1408                 channel_id: chan.2,
1409                 htlc_id: 0,
1410                 amount_msat: htlc_msat,
1411                 payment_hash: payment_hash,
1412                 cltv_expiry: htlc_cltv,
1413                 onion_routing_packet: onion_packet,
1414                 skimmed_fee_msat: None,
1415                 blinding_point: None,
1416         };
1417
1418         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
1419
1420         // Now manually create the commitment_signed message corresponding to the update_add
1421         // nodes[0] just sent. In the code for construction of this message, "local" refers
1422         // to the sender of the message, and "remote" refers to the receiver.
1423
1424         let feerate_per_kw = get_feerate!(nodes[0], nodes[1], chan.2);
1425
1426         const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
1427
1428         // Get the TestChannelSigner for each channel, which will be used to (1) get the keys
1429         // needed to sign the new commitment tx and (2) sign the new commitment tx.
1430         let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point, local_funding) = {
1431                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
1432                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
1433                 let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
1434                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
1435                 ).flatten().unwrap();
1436                 let chan_signer = local_chan.get_signer();
1437                 // Make the signer believe we validated another commitment, so we can release the secret
1438                 chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
1439
1440                 let pubkeys = chan_signer.as_ref().pubkeys();
1441                 (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
1442                  chan_signer.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER),
1443                  chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx),
1444                  chan_signer.as_ref().pubkeys().funding_pubkey)
1445         };
1446         let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = {
1447                 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
1448                 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
1449                 let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
1450                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
1451                 ).flatten().unwrap();
1452                 let chan_signer = remote_chan.get_signer();
1453                 let pubkeys = chan_signer.as_ref().pubkeys();
1454                 (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
1455                  chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
1456                  chan_signer.as_ref().pubkeys().funding_pubkey)
1457         };
1458
1459         // Assemble the set of keys we can use for signatures for our commitment_signed message.
1460         let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint,
1461                 &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint);
1462
1463         // Build the remote commitment transaction so we can sign it, and then later use the
1464         // signature for the commitment_signed message.
1465         let local_chan_balance = 1313;
1466
1467         let accepted_htlc_info = chan_utils::HTLCOutputInCommitment {
1468                 offered: false,
1469                 amount_msat: 3460001,
1470                 cltv_expiry: htlc_cltv,
1471                 payment_hash,
1472                 transaction_output_index: Some(1),
1473         };
1474
1475         let commitment_number = INITIAL_COMMITMENT_NUMBER - 1;
1476
1477         let res = {
1478                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
1479                 let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
1480                 let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
1481                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
1482                 ).flatten().unwrap();
1483                 let local_chan_signer = local_chan.get_signer();
1484                 let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
1485                         commitment_number,
1486                         95000,
1487                         local_chan_balance,
1488                         local_funding, remote_funding,
1489                         commit_tx_keys.clone(),
1490                         feerate_per_kw,
1491                         &mut vec![(accepted_htlc_info, ())],
1492                         &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
1493                 );
1494                 local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), Vec::new(), &secp_ctx).unwrap()
1495         };
1496
1497         let commit_signed_msg = msgs::CommitmentSigned {
1498                 channel_id: chan.2,
1499                 signature: res.0,
1500                 htlc_signatures: res.1,
1501                 #[cfg(taproot)]
1502                 partial_signature_with_nonce: None,
1503         };
1504
1505         // Send the commitment_signed message to the nodes[1].
1506         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
1507         let _ = nodes[1].node.get_and_clear_pending_msg_events();
1508
1509         // Send the RAA to nodes[1].
1510         let raa_msg = msgs::RevokeAndACK {
1511                 channel_id: chan.2,
1512                 per_commitment_secret: local_secret,
1513                 next_per_commitment_point: next_local_point,
1514                 #[cfg(taproot)]
1515                 next_local_nonce: None,
1516         };
1517         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa_msg);
1518
1519         let events = nodes[1].node.get_and_clear_pending_msg_events();
1520         assert_eq!(events.len(), 1);
1521         // Make sure the HTLC failed in the way we expect.
1522         match events[0] {
1523                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, .. }, .. } => {
1524                         assert_eq!(update_fail_htlcs.len(), 1);
1525                         update_fail_htlcs[0].clone()
1526                 },
1527                 _ => panic!("Unexpected event"),
1528         };
1529         nodes[1].logger.assert_log("lightning::ln::channel",
1530                 format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1);
1531
1532         check_added_monitors!(nodes[1], 2);
1533 }
1534
1535 #[test]
1536 fn test_chan_reserve_violation_outbound_htlc_inbound_chan() {
1537         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1538         // Set the fee rate for the channel very high, to the point where the fundee
1539         // sending any above-dust amount would result in a channel reserve violation.
1540         // In this test we check that we would be prevented from sending an HTLC in
1541         // this situation.
1542         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1543         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1544         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1545         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1546         let default_config = UserConfig::default();
1547         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1548
1549         let mut push_amt = 100_000_000;
1550         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1551
1552         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1553
1554         let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt);
1555
1556         // Fetch a route in advance as we will be unable to once we're unable to send.
1557         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000);
1558         // Sending exactly enough to hit the reserve amount should be accepted
1559         for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1560                 route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1561         }
1562
1563         // However one more HTLC should be significantly over the reserve amount and fail.
1564         unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash,
1565                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1566                 ), true, APIError::ChannelUnavailable { .. }, {});
1567         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1568 }
1569
1570 #[test]
1571 fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
1572         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1573         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1574         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1575         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1576         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1577         let default_config = UserConfig::default();
1578         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1579
1580         // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
1581         // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
1582         // transaction fee with 0 HTLCs (183 sats)).
1583         let mut push_amt = 100_000_000;
1584         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1585         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1586         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt);
1587
1588         // Send four HTLCs to cover the initial push_msat buffer we're required to include
1589         for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1590                 route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1591         }
1592
1593         let (mut route, payment_hash, _, payment_secret) =
1594                 get_route_and_payment_hash!(nodes[1], nodes[0], 1000);
1595         route.paths[0].hops[0].fee_msat = 700_000;
1596         // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1597         let secp_ctx = Secp256k1::new();
1598         let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
1599         let cur_height = nodes[1].node.best_block.read().unwrap().height + 1;
1600         let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
1601         let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
1602                 700_000, RecipientOnionFields::secret_only(payment_secret), cur_height, &None).unwrap();
1603         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
1604         let msg = msgs::UpdateAddHTLC {
1605                 channel_id: chan.2,
1606                 htlc_id: MIN_AFFORDABLE_HTLC_COUNT as u64,
1607                 amount_msat: htlc_msat,
1608                 payment_hash: payment_hash,
1609                 cltv_expiry: htlc_cltv,
1610                 onion_routing_packet: onion_packet,
1611                 skimmed_fee_msat: None,
1612                 blinding_point: None,
1613         };
1614
1615         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg);
1616         // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
1617         nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value", 3);
1618         assert_eq!(nodes[0].node.list_channels().len(), 0);
1619         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
1620         assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
1621         check_added_monitors!(nodes[0], 1);
1622         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() },
1623                 [nodes[1].node.get_our_node_id()], 100000);
1624 }
1625
1626 #[test]
1627 fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() {
1628         // Test that if we receive many dust HTLCs over an outbound channel, they don't count when
1629         // calculating our commitment transaction fee (this was previously broken).
1630         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1631         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1632
1633         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1634         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
1635         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1636         let default_config = UserConfig::default();
1637         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1638
1639         // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
1640         // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
1641         // transaction fee with 0 HTLCs (183 sats)).
1642         let mut push_amt = 100_000_000;
1643         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1644         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1645         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt);
1646
1647         let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000
1648                 + feerate_per_kw as u64 * htlc_success_tx_weight(&channel_type_features) / 1000 * 1000 - 1;
1649         // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel
1650         // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the
1651         // commitment transaction fee.
1652         route_payment(&nodes[1], &[&nodes[0]], dust_amt);
1653
1654         // Send four HTLCs to cover the initial push_msat buffer we're required to include
1655         for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1656                 route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1657         }
1658
1659         // One more than the dust amt should fail, however.
1660         let (mut route, our_payment_hash, _, our_payment_secret) =
1661                 get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt);
1662         route.paths[0].hops[0].fee_msat += 1;
1663         unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash,
1664                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1665                 ), true, APIError::ChannelUnavailable { .. }, {});
1666 }
1667
1668 #[test]
1669 fn test_chan_init_feerate_unaffordability() {
1670         // Test that we will reject channel opens which do not leave enough to pay for any HTLCs due to
1671         // channel reserve and feerate requirements.
1672         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1673         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1674         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1675         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1676         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1677         let default_config = UserConfig::default();
1678         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1679
1680         // Set the push_msat amount such that nodes[0] will not be able to afford to add even a single
1681         // HTLC.
1682         let mut push_amt = 100_000_000;
1683         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1684         assert_eq!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt + 1, 42, None, None).unwrap_err(),
1685                 APIError::APIMisuseError { err: "Funding amount (356) can't even pay fee for initial commitment transaction fee of 357.".to_string() });
1686
1687         // During open, we don't have a "counterparty channel reserve" to check against, so that
1688         // requirement only comes into play on the open_channel handling side.
1689         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1690         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt, 42, None, None).unwrap();
1691         let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
1692         open_channel_msg.push_msat += 1;
1693         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
1694
1695         let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
1696         assert_eq!(msg_events.len(), 1);
1697         match msg_events[0] {
1698                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
1699                         assert_eq!(msg.data, "Insufficient funding amount for initial reserve");
1700                 },
1701                 _ => panic!("Unexpected event"),
1702         }
1703 }
1704
1705 #[test]
1706 fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() {
1707         // Test that if we receive many dust HTLCs over an inbound channel, they don't count when
1708         // calculating our counterparty's commitment transaction fee (this was previously broken).
1709         let chanmon_cfgs = create_chanmon_cfgs(2);
1710         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1711         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
1712         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1713         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 98000000);
1714
1715         let payment_amt = 46000; // Dust amount
1716         // In the previous code, these first four payments would succeed.
1717         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1718         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1719         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1720         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1721
1722         // Then these next 5 would be interpreted by nodes[1] as violating the fee spike buffer.
1723         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1724         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1725         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1726         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1727         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1728
1729         // And this last payment previously resulted in nodes[1] closing on its inbound-channel
1730         // counterparty, because it counted all the previous dust HTLCs against nodes[0]'s commitment
1731         // transaction fee and therefore perceived this next payment as a channel reserve violation.
1732         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1733 }
1734
1735 #[test]
1736 fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
1737         let chanmon_cfgs = create_chanmon_cfgs(3);
1738         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1739         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1740         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1741         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1742         let _ = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000);
1743
1744         let feemsat = 239;
1745         let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat;
1746         let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
1747         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
1748         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
1749
1750         // Add a 2* and +1 for the fee spike reserve.
1751         let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features);
1752         let recv_value_1 = (chan_stat.value_to_self_msat - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlc)/2;
1753         let amt_msat_1 = recv_value_1 + total_routing_fee_msat;
1754
1755         // Add a pending HTLC.
1756         let (route_1, our_payment_hash_1, _, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat_1);
1757         let payment_event_1 = {
1758                 nodes[0].node.send_payment_with_route(&route_1, our_payment_hash_1,
1759                         RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
1760                 check_added_monitors!(nodes[0], 1);
1761
1762                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1763                 assert_eq!(events.len(), 1);
1764                 SendEvent::from_event(events.remove(0))
1765         };
1766         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
1767
1768         // Attempt to trigger a channel reserve violation --> payment failure.
1769         let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2, &channel_type_features);
1770         let recv_value_2 = chan_stat.value_to_self_msat - amt_msat_1 - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlcs + 1;
1771         let amt_msat_2 = recv_value_2 + total_routing_fee_msat;
1772         let mut route_2 = route_1.clone();
1773         route_2.paths[0].hops.last_mut().unwrap().fee_msat = amt_msat_2;
1774
1775         // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1776         let secp_ctx = Secp256k1::new();
1777         let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
1778         let cur_height = nodes[0].node.best_block.read().unwrap().height + 1;
1779         let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route_2.paths[0], &session_priv).unwrap();
1780         let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
1781                 &route_2.paths[0], recv_value_2, RecipientOnionFields::spontaneous_empty(), cur_height, &None).unwrap();
1782         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash_1).unwrap();
1783         let msg = msgs::UpdateAddHTLC {
1784                 channel_id: chan.2,
1785                 htlc_id: 1,
1786                 amount_msat: htlc_msat + 1,
1787                 payment_hash: our_payment_hash_1,
1788                 cltv_expiry: htlc_cltv,
1789                 onion_routing_packet: onion_packet,
1790                 skimmed_fee_msat: None,
1791                 blinding_point: None,
1792         };
1793
1794         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
1795         // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
1796         nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote HTLC add would put them under remote reserve value", 3);
1797         assert_eq!(nodes[1].node.list_channels().len(), 1);
1798         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
1799         assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
1800         check_added_monitors!(nodes[1], 1);
1801         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() },
1802                 [nodes[0].node.get_our_node_id()], 100000);
1803 }
1804
1805 #[test]
1806 fn test_inbound_outbound_capacity_is_not_zero() {
1807         let chanmon_cfgs = create_chanmon_cfgs(2);
1808         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1809         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1810         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1811         let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1812         let channels0 = node_chanmgrs[0].list_channels();
1813         let channels1 = node_chanmgrs[1].list_channels();
1814         let default_config = UserConfig::default();
1815         assert_eq!(channels0.len(), 1);
1816         assert_eq!(channels1.len(), 1);
1817
1818         let reserve = get_holder_selected_channel_reserve_satoshis(100_000, &default_config);
1819         assert_eq!(channels0[0].inbound_capacity_msat, 95000000 - reserve*1000);
1820         assert_eq!(channels1[0].outbound_capacity_msat, 95000000 - reserve*1000);
1821
1822         assert_eq!(channels0[0].outbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
1823         assert_eq!(channels1[0].inbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
1824 }
1825
1826 fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64, channel_type_features: &ChannelTypeFeatures) -> u64 {
1827         (commitment_tx_base_weight(channel_type_features) + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000
1828 }
1829
1830 #[test]
1831 fn test_channel_reserve_holding_cell_htlcs() {
1832         let chanmon_cfgs = create_chanmon_cfgs(3);
1833         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1834         // When this test was written, the default base fee floated based on the HTLC count.
1835         // It is now fixed, so we simply set the fee to the expected value here.
1836         let mut config = test_default_channel_config();
1837         config.channel_config.forwarding_fee_base_msat = 239;
1838         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
1839         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1840         let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 190000, 1001);
1841         let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 190000, 1001);
1842
1843         let mut stat01 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
1844         let mut stat11 = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2);
1845
1846         let mut stat12 = get_channel_value_stat!(nodes[1], nodes[2], chan_2.2);
1847         let mut stat22 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2);
1848
1849         macro_rules! expect_forward {
1850                 ($node: expr) => {{
1851                         let mut events = $node.node.get_and_clear_pending_msg_events();
1852                         assert_eq!(events.len(), 1);
1853                         check_added_monitors!($node, 1);
1854                         let payment_event = SendEvent::from_event(events.remove(0));
1855                         payment_event
1856                 }}
1857         }
1858
1859         let feemsat = 239; // set above
1860         let total_fee_msat = (nodes.len() - 2) as u64 * feemsat;
1861         let feerate = get_feerate!(nodes[0], nodes[1], chan_1.2);
1862         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_1.2);
1863
1864         let recv_value_0 = stat01.counterparty_max_htlc_value_in_flight_msat - total_fee_msat;
1865
1866         // attempt to send amt_msat > their_max_htlc_value_in_flight_msat
1867         {
1868                 let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
1869                         .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0);
1870                 let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, recv_value_0);
1871                 route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
1872                 assert!(route.paths[0].hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
1873
1874                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1875                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1876                         ), true, APIError::ChannelUnavailable { .. }, {});
1877                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1878         }
1879
1880         // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete
1881         // nodes[0]'s wealth
1882         loop {
1883                 let amt_msat = recv_value_0 + total_fee_msat;
1884                 // 3 for the 3 HTLCs that will be sent, 2* and +1 for the fee spike reserve.
1885                 // Also, ensure that each payment has enough to be over the dust limit to
1886                 // ensure it'll be included in each commit tx fee calculation.
1887                 let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features);
1888                 let ensure_htlc_amounts_above_dust_buffer = 3 * (stat01.counterparty_dust_limit_msat + 1000);
1889                 if stat01.value_to_self_msat < stat01.channel_reserve_msat + commit_tx_fee_all_htlcs + ensure_htlc_amounts_above_dust_buffer + amt_msat {
1890                         break;
1891                 }
1892
1893                 let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
1894                         .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0);
1895                 let route = get_route!(nodes[0], payment_params, recv_value_0).unwrap();
1896                 let (payment_preimage, ..) = send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], recv_value_0);
1897                 claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
1898
1899                 let (stat01_, stat11_, stat12_, stat22_) = (
1900                         get_channel_value_stat!(nodes[0], nodes[1], chan_1.2),
1901                         get_channel_value_stat!(nodes[1], nodes[0], chan_1.2),
1902                         get_channel_value_stat!(nodes[1], nodes[2], chan_2.2),
1903                         get_channel_value_stat!(nodes[2], nodes[1], chan_2.2),
1904                 );
1905
1906                 assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat);
1907                 assert_eq!(stat11_.value_to_self_msat, stat11.value_to_self_msat + amt_msat);
1908                 assert_eq!(stat12_.value_to_self_msat, stat12.value_to_self_msat - (amt_msat - feemsat));
1909                 assert_eq!(stat22_.value_to_self_msat, stat22.value_to_self_msat + (amt_msat - feemsat));
1910                 stat01 = stat01_; stat11 = stat11_; stat12 = stat12_; stat22 = stat22_;
1911         }
1912
1913         // adding pending output.
1914         // 2* and +1 HTLCs on the commit tx fee for the fee spike reserve.
1915         // The reason we're dividing by two here is as follows: the dividend is the total outbound liquidity
1916         // after fees, the channel reserve, and the fee spike buffer are removed. We eventually want to
1917         // divide this quantity into 3 portions, that will each be sent in an HTLC. This allows us
1918         // to test channel channel reserve policy at the edges of what amount is sendable, i.e.
1919         // cases where 1 msat over X amount will cause a payment failure, but anything less than
1920         // that can be sent successfully. So, dividing by two is a somewhat arbitrary way of getting
1921         // the amount of the first of these aforementioned 3 payments. The reason we split into 3 payments
1922         // is to test the behavior of the holding cell with respect to channel reserve and commit tx fee
1923         // policy.
1924         let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features);
1925         let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs)/2;
1926         let amt_msat_1 = recv_value_1 + total_fee_msat;
1927
1928         let (route_1, our_payment_hash_1, our_payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_1);
1929         let payment_event_1 = {
1930                 nodes[0].node.send_payment_with_route(&route_1, our_payment_hash_1,
1931                         RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
1932                 check_added_monitors!(nodes[0], 1);
1933
1934                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1935                 assert_eq!(events.len(), 1);
1936                 SendEvent::from_event(events.remove(0))
1937         };
1938         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
1939
1940         // channel reserve test with htlc pending output > 0
1941         let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs;
1942         {
1943                 let mut route = route_1.clone();
1944                 route.paths[0].hops.last_mut().unwrap().fee_msat = recv_value_2 + 1;
1945                 let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]);
1946                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1947                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1948                         ), true, APIError::ChannelUnavailable { .. }, {});
1949                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1950         }
1951
1952         // split the rest to test holding cell
1953         let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features);
1954         let additional_htlc_cost_msat = commit_tx_fee_3_htlcs - commit_tx_fee_2_htlcs;
1955         let recv_value_21 = recv_value_2/2 - additional_htlc_cost_msat/2;
1956         let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat - additional_htlc_cost_msat;
1957         {
1958                 let stat = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
1959                 assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat + commit_tx_fee_3_htlcs), stat.channel_reserve_msat);
1960         }
1961
1962         // now see if they go through on both sides
1963         let (route_21, our_payment_hash_21, our_payment_preimage_21, our_payment_secret_21) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_21);
1964         // but this will stuck in the holding cell
1965         nodes[0].node.send_payment_with_route(&route_21, our_payment_hash_21,
1966                 RecipientOnionFields::secret_only(our_payment_secret_21), PaymentId(our_payment_hash_21.0)).unwrap();
1967         check_added_monitors!(nodes[0], 0);
1968         let events = nodes[0].node.get_and_clear_pending_events();
1969         assert_eq!(events.len(), 0);
1970
1971         // test with outbound holding cell amount > 0
1972         {
1973                 let (mut route, our_payment_hash, _, our_payment_secret) =
1974                         get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22);
1975                 route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
1976                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1977                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1978                         ), true, APIError::ChannelUnavailable { .. }, {});
1979                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1980         }
1981
1982         let (route_22, our_payment_hash_22, our_payment_preimage_22, our_payment_secret_22) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22);
1983         // this will also stuck in the holding cell
1984         nodes[0].node.send_payment_with_route(&route_22, our_payment_hash_22,
1985                 RecipientOnionFields::secret_only(our_payment_secret_22), PaymentId(our_payment_hash_22.0)).unwrap();
1986         check_added_monitors!(nodes[0], 0);
1987         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
1988         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1989
1990         // flush the pending htlc
1991         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg);
1992         let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1993         check_added_monitors!(nodes[1], 1);
1994
1995         // the pending htlc should be promoted to committed
1996         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack);
1997         check_added_monitors!(nodes[0], 1);
1998         let commitment_update_2 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1999
2000         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed);
2001         let bs_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2002         // No commitment_signed so get_event_msg's assert(len == 1) passes
2003         check_added_monitors!(nodes[0], 1);
2004
2005         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack);
2006         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2007         check_added_monitors!(nodes[1], 1);
2008
2009         expect_pending_htlcs_forwardable!(nodes[1]);
2010
2011         let ref payment_event_11 = expect_forward!(nodes[1]);
2012         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]);
2013         commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false);
2014
2015         expect_pending_htlcs_forwardable!(nodes[2]);
2016         expect_payment_claimable!(nodes[2], our_payment_hash_1, our_payment_secret_1, recv_value_1);
2017
2018         // flush the htlcs in the holding cell
2019         assert_eq!(commitment_update_2.update_add_htlcs.len(), 2);
2020         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]);
2021         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]);
2022         commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false);
2023         expect_pending_htlcs_forwardable!(nodes[1]);
2024
2025         let ref payment_event_3 = expect_forward!(nodes[1]);
2026         assert_eq!(payment_event_3.msgs.len(), 2);
2027         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[0]);
2028         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]);
2029
2030         commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false);
2031         expect_pending_htlcs_forwardable!(nodes[2]);
2032
2033         let events = nodes[2].node.get_and_clear_pending_events();
2034         assert_eq!(events.len(), 2);
2035         match events[0] {
2036                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
2037                         assert_eq!(our_payment_hash_21, *payment_hash);
2038                         assert_eq!(recv_value_21, amount_msat);
2039                         assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
2040                         assert_eq!(via_channel_id, Some(chan_2.2));
2041                         match &purpose {
2042                                 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
2043                                         assert!(payment_preimage.is_none());
2044                                         assert_eq!(our_payment_secret_21, *payment_secret);
2045                                 },
2046                                 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
2047                         }
2048                 },
2049                 _ => panic!("Unexpected event"),
2050         }
2051         match events[1] {
2052                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
2053                         assert_eq!(our_payment_hash_22, *payment_hash);
2054                         assert_eq!(recv_value_22, amount_msat);
2055                         assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
2056                         assert_eq!(via_channel_id, Some(chan_2.2));
2057                         match &purpose {
2058                                 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
2059                                         assert!(payment_preimage.is_none());
2060                                         assert_eq!(our_payment_secret_22, *payment_secret);
2061                                 },
2062                                 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
2063                         }
2064                 },
2065                 _ => panic!("Unexpected event"),
2066         }
2067
2068         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_1);
2069         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21);
2070         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22);
2071
2072         let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1, &channel_type_features);
2073         let recv_value_3 = commit_tx_fee_2_htlcs - commit_tx_fee_0_htlcs - total_fee_msat;
2074         send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_3);
2075
2076         let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
2077         let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat) - (recv_value_3 + total_fee_msat);
2078         let stat0 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
2079         assert_eq!(stat0.value_to_self_msat, expected_value_to_self);
2080         assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat + commit_tx_fee_1_htlc);
2081
2082         let stat2 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2);
2083         assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22 + recv_value_3);
2084 }
2085
2086 #[test]
2087 fn channel_reserve_in_flight_removes() {
2088         // In cases where one side claims an HTLC, it thinks it has additional available funds that it
2089         // can send to its counterparty, but due to update ordering, the other side may not yet have
2090         // considered those HTLCs fully removed.
2091         // This tests that we don't count HTLCs which will not be included in the next remote
2092         // commitment transaction towards the reserve value (as it implies no commitment transaction
2093         // will be generated which violates the remote reserve value).
2094         // This was broken previously, and discovered by the chanmon_fail_consistency fuzz test.
2095         // To test this we:
2096         //  * route two HTLCs from A to B (note that, at a high level, this test is checking that, when
2097         //    you consider the values of both of these HTLCs, B may not send an HTLC back to A, but if
2098         //    you only consider the value of the first HTLC, it may not),
2099         //  * start routing a third HTLC from A to B,
2100         //  * claim the first two HTLCs (though B will generate an update_fulfill for one, and put
2101         //    the other claim in its holding cell, as it immediately goes into AwaitingRAA),
2102         //  * deliver the first fulfill from B
2103         //  * deliver the update_add and an RAA from A, resulting in B freeing the second holding cell
2104         //    claim,
2105         //  * deliver A's response CS and RAA.
2106         //    This results in A having the second HTLC in AwaitingRemovedRemoteRevoke, but B having
2107         //    removed it fully. B now has the push_msat plus the first two HTLCs in value.
2108         //  * Now B happily sends another HTLC, potentially violating its reserve value from A's point
2109         //    of view (if A counts the AwaitingRemovedRemoteRevoke HTLC).
2110         let chanmon_cfgs = create_chanmon_cfgs(2);
2111         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2112         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2113         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2114         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2115
2116         let b_chan_values = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2);
2117         // Route the first two HTLCs.
2118         let payment_value_1 = b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000;
2119         let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], payment_value_1);
2120         let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 20_000);
2121
2122         // Start routing the third HTLC (this is just used to get everyone in the right state).
2123         let (route, payment_hash_3, payment_preimage_3, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
2124         let send_1 = {
2125                 nodes[0].node.send_payment_with_route(&route, payment_hash_3,
2126                         RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
2127                 check_added_monitors!(nodes[0], 1);
2128                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2129                 assert_eq!(events.len(), 1);
2130                 SendEvent::from_event(events.remove(0))
2131         };
2132
2133         // Now claim both of the first two HTLCs on B's end, putting B in AwaitingRAA and generating an
2134         // initial fulfill/CS.
2135         nodes[1].node.claim_funds(payment_preimage_1);
2136         expect_payment_claimed!(nodes[1], payment_hash_1, payment_value_1);
2137         check_added_monitors!(nodes[1], 1);
2138         let bs_removes = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2139
2140         // This claim goes in B's holding cell, allowing us to have a pending B->A RAA which does not
2141         // remove the second HTLC when we send the HTLC back from B to A.
2142         nodes[1].node.claim_funds(payment_preimage_2);
2143         expect_payment_claimed!(nodes[1], payment_hash_2, 20_000);
2144         check_added_monitors!(nodes[1], 1);
2145         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2146
2147         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_removes.update_fulfill_htlcs[0]);
2148         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed);
2149         check_added_monitors!(nodes[0], 1);
2150         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2151         expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
2152
2153         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]);
2154         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg);
2155         check_added_monitors!(nodes[1], 1);
2156         // B is already AwaitingRAA, so cant generate a CS here
2157         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2158
2159         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2160         check_added_monitors!(nodes[1], 1);
2161         let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2162
2163         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2164         check_added_monitors!(nodes[0], 1);
2165         let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2166
2167         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2168         check_added_monitors!(nodes[1], 1);
2169         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2170
2171         // The second HTLCis removed, but as A is in AwaitingRAA it can't generate a CS here, so the
2172         // RAA that B generated above doesn't fully resolve the second HTLC from A's point of view.
2173         // However, the RAA A generates here *does* fully resolve the HTLC from B's point of view (as A
2174         // can no longer broadcast a commitment transaction with it and B has the preimage so can go
2175         // on-chain as necessary).
2176         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_cs.update_fulfill_htlcs[0]);
2177         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
2178         check_added_monitors!(nodes[0], 1);
2179         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2180         expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false);
2181
2182         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2183         check_added_monitors!(nodes[1], 1);
2184         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2185
2186         expect_pending_htlcs_forwardable!(nodes[1]);
2187         expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 100000);
2188
2189         // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't
2190         // resolve the second HTLC from A's point of view.
2191         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2192         check_added_monitors!(nodes[0], 1);
2193         expect_payment_path_successful!(nodes[0]);
2194         let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2195
2196         // Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back
2197         // to A to ensure that A doesn't count the almost-removed HTLC in update_add processing.
2198         let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[1], nodes[0], 10000);
2199         let send_2 = {
2200                 nodes[1].node.send_payment_with_route(&route, payment_hash_4,
2201                         RecipientOnionFields::secret_only(payment_secret_4), PaymentId(payment_hash_4.0)).unwrap();
2202                 check_added_monitors!(nodes[1], 1);
2203                 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
2204                 assert_eq!(events.len(), 1);
2205                 SendEvent::from_event(events.remove(0))
2206         };
2207
2208         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_2.msgs[0]);
2209         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_2.commitment_msg);
2210         check_added_monitors!(nodes[0], 1);
2211         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2212
2213         // Now just resolve all the outstanding messages/HTLCs for completeness...
2214
2215         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2216         check_added_monitors!(nodes[1], 1);
2217         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2218
2219         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2220         check_added_monitors!(nodes[1], 1);
2221
2222         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2223         check_added_monitors!(nodes[0], 1);
2224         expect_payment_path_successful!(nodes[0]);
2225         let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2226
2227         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2228         check_added_monitors!(nodes[1], 1);
2229         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2230
2231         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2232         check_added_monitors!(nodes[0], 1);
2233
2234         expect_pending_htlcs_forwardable!(nodes[0]);
2235         expect_payment_claimable!(nodes[0], payment_hash_4, payment_secret_4, 10000);
2236
2237         claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4);
2238         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
2239 }
2240
2241 #[test]
2242 fn channel_monitor_network_test() {
2243         // Simple test which builds a network of ChannelManagers, connects them to each other, and
2244         // tests that ChannelMonitor is able to recover from various states.
2245         let chanmon_cfgs = create_chanmon_cfgs(5);
2246         let node_cfgs = create_node_cfgs(5, &chanmon_cfgs);
2247         let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &[None, None, None, None, None]);
2248         let nodes = create_network(5, &node_cfgs, &node_chanmgrs);
2249
2250         // Create some initial channels
2251         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2252         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
2253         let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
2254         let chan_4 = create_announced_chan_between_nodes(&nodes, 3, 4);
2255
2256         // Make sure all nodes are at the same starting height
2257         connect_blocks(&nodes[0], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
2258         connect_blocks(&nodes[1], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
2259         connect_blocks(&nodes[2], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
2260         connect_blocks(&nodes[3], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[3].best_block_info().1);
2261         connect_blocks(&nodes[4], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[4].best_block_info().1);
2262
2263         // Rebalance the network a bit by relaying one payment through all the channels...
2264         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2265         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2266         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2267         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2268
2269         // Simple case with no pending HTLCs:
2270         nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
2271         check_added_monitors!(nodes[1], 1);
2272         check_closed_broadcast!(nodes[1], true);
2273         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
2274         {
2275                 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
2276                 assert_eq!(node_txn.len(), 1);
2277                 mine_transaction(&nodes[1], &node_txn[0]);
2278                 if nodes[1].connect_style.borrow().updates_best_block_first() {
2279                         let _ = nodes[1].tx_broadcaster.txn_broadcast();
2280                 }
2281
2282                 mine_transaction(&nodes[0], &node_txn[0]);
2283                 check_added_monitors!(nodes[0], 1);
2284                 test_txn_broadcast(&nodes[0], &chan_1, Some(node_txn[0].clone()), HTLCType::NONE);
2285         }
2286         check_closed_broadcast!(nodes[0], true);
2287         assert_eq!(nodes[0].node.list_channels().len(), 0);
2288         assert_eq!(nodes[1].node.list_channels().len(), 1);
2289         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2290
2291         // One pending HTLC is discarded by the force-close:
2292         let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000);
2293
2294         // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
2295         // broadcasted until we reach the timelock time).
2296         nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
2297         check_closed_broadcast!(nodes[1], true);
2298         check_added_monitors!(nodes[1], 1);
2299         {
2300                 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE);
2301                 connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1);
2302                 test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
2303                 mine_transaction(&nodes[2], &node_txn[0]);
2304                 check_added_monitors!(nodes[2], 1);
2305                 test_txn_broadcast(&nodes[2], &chan_2, Some(node_txn[0].clone()), HTLCType::NONE);
2306         }
2307         check_closed_broadcast!(nodes[2], true);
2308         assert_eq!(nodes[1].node.list_channels().len(), 0);
2309         assert_eq!(nodes[2].node.list_channels().len(), 1);
2310         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
2311         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2312
2313         macro_rules! claim_funds {
2314                 ($node: expr, $prev_node: expr, $preimage: expr, $payment_hash: expr) => {
2315                         {
2316                                 $node.node.claim_funds($preimage);
2317                                 expect_payment_claimed!($node, $payment_hash, 3_000_000);
2318                                 check_added_monitors!($node, 1);
2319
2320                                 let events = $node.node.get_and_clear_pending_msg_events();
2321                                 assert_eq!(events.len(), 1);
2322                                 match events[0] {
2323                                         MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. } } => {
2324                                                 assert!(update_add_htlcs.is_empty());
2325                                                 assert!(update_fail_htlcs.is_empty());
2326                                                 assert_eq!(*node_id, $prev_node.node.get_our_node_id());
2327                                         },
2328                                         _ => panic!("Unexpected event"),
2329                                 };
2330                         }
2331                 }
2332         }
2333
2334         // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
2335         // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
2336         nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap();
2337         check_added_monitors!(nodes[2], 1);
2338         check_closed_broadcast!(nodes[2], true);
2339         let node2_commitment_txid;
2340         {
2341                 let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE);
2342                 connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1);
2343                 test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT);
2344                 node2_commitment_txid = node_txn[0].txid();
2345
2346                 // Claim the payment on nodes[3], giving it knowledge of the preimage
2347                 claim_funds!(nodes[3], nodes[2], payment_preimage_1, payment_hash_1);
2348                 mine_transaction(&nodes[3], &node_txn[0]);
2349                 check_added_monitors!(nodes[3], 1);
2350                 check_preimage_claim(&nodes[3], &node_txn);
2351         }
2352         check_closed_broadcast!(nodes[3], true);
2353         assert_eq!(nodes[2].node.list_channels().len(), 0);
2354         assert_eq!(nodes[3].node.list_channels().len(), 1);
2355         check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000);
2356         check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
2357
2358         // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
2359         // confusing us in the following tests.
2360         let chan_3_mon = nodes[3].chain_monitor.chain_monitor.remove_monitor(&OutPoint { txid: chan_3.3.txid(), index: 0 });
2361
2362         // One pending HTLC to time out:
2363         let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[3], &[&nodes[4]], 3_000_000);
2364         // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for
2365         // buffer space).
2366
2367         let (close_chan_update_1, close_chan_update_2) = {
2368                 connect_blocks(&nodes[3], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
2369                 let events = nodes[3].node.get_and_clear_pending_msg_events();
2370                 assert_eq!(events.len(), 2);
2371                 let close_chan_update_1 = match events[1] {
2372                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
2373                                 msg.clone()
2374                         },
2375                         _ => panic!("Unexpected event"),
2376                 };
2377                 match events[0] {
2378                         MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
2379                                 assert_eq!(node_id, nodes[4].node.get_our_node_id());
2380                         },
2381                         _ => panic!("Unexpected event"),
2382                 }
2383                 check_added_monitors!(nodes[3], 1);
2384
2385                 // Clear bumped claiming txn spending node 2 commitment tx. Bumped txn are generated after reaching some height timer.
2386                 {
2387                         let mut node_txn = nodes[3].tx_broadcaster.txn_broadcasted.lock().unwrap();
2388                         node_txn.retain(|tx| {
2389                                 if tx.input[0].previous_output.txid == node2_commitment_txid {
2390                                         false
2391                                 } else { true }
2392                         });
2393                 }
2394
2395                 let node_txn = test_txn_broadcast(&nodes[3], &chan_4, None, HTLCType::TIMEOUT);
2396
2397                 // Claim the payment on nodes[4], giving it knowledge of the preimage
2398                 claim_funds!(nodes[4], nodes[3], payment_preimage_2, payment_hash_2);
2399
2400                 connect_blocks(&nodes[4], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2);
2401                 let events = nodes[4].node.get_and_clear_pending_msg_events();
2402                 assert_eq!(events.len(), 2);
2403                 let close_chan_update_2 = match events[1] {
2404                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
2405                                 msg.clone()
2406                         },
2407                         _ => panic!("Unexpected event"),
2408                 };
2409                 match events[0] {
2410                         MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
2411                                 assert_eq!(node_id, nodes[3].node.get_our_node_id());
2412                         },
2413                         _ => panic!("Unexpected event"),
2414                 }
2415                 check_added_monitors!(nodes[4], 1);
2416                 test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
2417                 check_closed_event!(nodes[4], 1, ClosureReason::HTLCsTimedOut, [nodes[3].node.get_our_node_id()], 100000);
2418
2419                 mine_transaction(&nodes[4], &node_txn[0]);
2420                 check_preimage_claim(&nodes[4], &node_txn);
2421                 (close_chan_update_1, close_chan_update_2)
2422         };
2423         nodes[3].gossip_sync.handle_channel_update(&close_chan_update_2).unwrap();
2424         nodes[4].gossip_sync.handle_channel_update(&close_chan_update_1).unwrap();
2425         assert_eq!(nodes[3].node.list_channels().len(), 0);
2426         assert_eq!(nodes[4].node.list_channels().len(), 0);
2427
2428         assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon),
2429                 Ok(ChannelMonitorUpdateStatus::Completed));
2430         check_closed_event!(nodes[3], 1, ClosureReason::HTLCsTimedOut, [nodes[4].node.get_our_node_id()], 100000);
2431 }
2432
2433 #[test]
2434 fn test_justice_tx_htlc_timeout() {
2435         // Test justice txn built on revoked HTLC-Timeout tx, against both sides
2436         let mut alice_config = test_default_channel_config();
2437         alice_config.channel_handshake_config.announced_channel = true;
2438         alice_config.channel_handshake_limits.force_announced_channel_preference = false;
2439         alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5;
2440         let mut bob_config = test_default_channel_config();
2441         bob_config.channel_handshake_config.announced_channel = true;
2442         bob_config.channel_handshake_limits.force_announced_channel_preference = false;
2443         bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3;
2444         let user_cfgs = [Some(alice_config), Some(bob_config)];
2445         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2446         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2447         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
2448         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2449         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
2450         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2451         // Create some new channels:
2452         let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1);
2453
2454         // A pending HTLC which will be revoked:
2455         let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2456         // Get the will-be-revoked local txn from nodes[0]
2457         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_5.2);
2458         assert_eq!(revoked_local_txn.len(), 2); // First commitment tx, then HTLC tx
2459         assert_eq!(revoked_local_txn[0].input.len(), 1);
2460         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_5.3.txid());
2461         assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to 0 are present
2462         assert_eq!(revoked_local_txn[1].input.len(), 1);
2463         assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
2464         assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
2465         // Revoke the old state
2466         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
2467
2468         {
2469                 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2470                 {
2471                         let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
2472                         assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2473                         assert_eq!(node_txn[0].input.len(), 2); // We should claim the revoked output and the HTLC output
2474                         check_spends!(node_txn[0], revoked_local_txn[0]);
2475                         node_txn.swap_remove(0);
2476                 }
2477                 check_added_monitors!(nodes[1], 1);
2478                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2479                 test_txn_broadcast(&nodes[1], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
2480
2481                 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2482                 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
2483                 // Verify broadcast of revoked HTLC-timeout
2484                 let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
2485                 check_added_monitors!(nodes[0], 1);
2486                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2487                 // Broadcast revoked HTLC-timeout on node 1
2488                 mine_transaction(&nodes[1], &node_txn[1]);
2489                 test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone());
2490         }
2491         get_announce_close_broadcast_events(&nodes, 0, 1);
2492         assert_eq!(nodes[0].node.list_channels().len(), 0);
2493         assert_eq!(nodes[1].node.list_channels().len(), 0);
2494 }
2495
2496 #[test]
2497 fn test_justice_tx_htlc_success() {
2498         // Test justice txn built on revoked HTLC-Success tx, against both sides
2499         let mut alice_config = test_default_channel_config();
2500         alice_config.channel_handshake_config.announced_channel = true;
2501         alice_config.channel_handshake_limits.force_announced_channel_preference = false;
2502         alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5;
2503         let mut bob_config = test_default_channel_config();
2504         bob_config.channel_handshake_config.announced_channel = true;
2505         bob_config.channel_handshake_limits.force_announced_channel_preference = false;
2506         bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3;
2507         let user_cfgs = [Some(alice_config), Some(bob_config)];
2508         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2509         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2510         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
2511         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2512         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
2513         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2514         // Create some new channels:
2515         let chan_6 = create_announced_chan_between_nodes(&nodes, 0, 1);
2516
2517         // A pending HTLC which will be revoked:
2518         let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2519         // Get the will-be-revoked local txn from B
2520         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_6.2);
2521         assert_eq!(revoked_local_txn.len(), 1); // Only commitment tx
2522         assert_eq!(revoked_local_txn[0].input.len(), 1);
2523         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_6.3.txid());
2524         assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to A are present
2525         // Revoke the old state
2526         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_4);
2527         {
2528                 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2529                 {
2530                         let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
2531                         assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2532                         assert_eq!(node_txn[0].input.len(), 1); // We claim the received HTLC output
2533
2534                         check_spends!(node_txn[0], revoked_local_txn[0]);
2535                         node_txn.swap_remove(0);
2536                 }
2537                 check_added_monitors!(nodes[0], 1);
2538                 test_txn_broadcast(&nodes[0], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
2539
2540                 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2541                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2542                 let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
2543                 check_added_monitors!(nodes[1], 1);
2544                 mine_transaction(&nodes[0], &node_txn[1]);
2545                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2546                 test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone());
2547         }
2548         get_announce_close_broadcast_events(&nodes, 0, 1);
2549         assert_eq!(nodes[0].node.list_channels().len(), 0);
2550         assert_eq!(nodes[1].node.list_channels().len(), 0);
2551 }
2552
2553 #[test]
2554 fn revoked_output_claim() {
2555         // Simple test to ensure a node will claim a revoked output when a stale remote commitment
2556         // transaction is broadcast by its counterparty
2557         let chanmon_cfgs = create_chanmon_cfgs(2);
2558         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2559         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2560         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2561         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2562         // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output
2563         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2564         assert_eq!(revoked_local_txn.len(), 1);
2565         // Only output is the full channel value back to nodes[0]:
2566         assert_eq!(revoked_local_txn[0].output.len(), 1);
2567         // Send a payment through, updating everyone's latest commitment txn
2568         send_payment(&nodes[0], &vec!(&nodes[1])[..], 5000000);
2569
2570         // Inform nodes[1] that nodes[0] broadcast a stale tx
2571         mine_transaction(&nodes[1], &revoked_local_txn[0]);
2572         check_added_monitors!(nodes[1], 1);
2573         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2574         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
2575         assert_eq!(node_txn.len(), 1); // ChannelMonitor: justice tx against revoked to_local output
2576
2577         check_spends!(node_txn[0], revoked_local_txn[0]);
2578
2579         // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
2580         mine_transaction(&nodes[0], &revoked_local_txn[0]);
2581         get_announce_close_broadcast_events(&nodes, 0, 1);
2582         check_added_monitors!(nodes[0], 1);
2583         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2584 }
2585
2586 #[test]
2587 fn test_forming_justice_tx_from_monitor_updates() {
2588         do_test_forming_justice_tx_from_monitor_updates(true);
2589         do_test_forming_justice_tx_from_monitor_updates(false);
2590 }
2591
2592 fn do_test_forming_justice_tx_from_monitor_updates(broadcast_initial_commitment: bool) {
2593         // Simple test to make sure that the justice tx formed in WatchtowerPersister
2594         // is properly formed and can be broadcasted/confirmed successfully in the event
2595         // that a revoked commitment transaction is broadcasted
2596         // (Similar to `revoked_output_claim` test but we get the justice tx + broadcast manually)
2597         let chanmon_cfgs = create_chanmon_cfgs(2);
2598         let destination_script0 = chanmon_cfgs[0].keys_manager.get_destination_script([0; 32]).unwrap();
2599         let destination_script1 = chanmon_cfgs[1].keys_manager.get_destination_script([0; 32]).unwrap();
2600         let persisters = vec![WatchtowerPersister::new(destination_script0),
2601                 WatchtowerPersister::new(destination_script1)];
2602         let node_cfgs = create_node_cfgs_with_persisters(2, &chanmon_cfgs, persisters.iter().collect());
2603         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2604         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2605         let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
2606         let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 };
2607
2608         if !broadcast_initial_commitment {
2609                 // Send a payment to move the channel forward
2610                 send_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000);
2611         }
2612
2613         // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output.
2614         // We'll keep this commitment transaction to broadcast once it's revoked.
2615         let revoked_local_txn = get_local_commitment_txn!(nodes[0], channel_id);
2616         assert_eq!(revoked_local_txn.len(), 1);
2617         let revoked_commitment_tx = &revoked_local_txn[0];
2618
2619         // Send another payment, now revoking the previous commitment tx
2620         send_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000);
2621
2622         let justice_tx = persisters[1].justice_tx(funding_txo, &revoked_commitment_tx.txid()).unwrap();
2623         check_spends!(justice_tx, revoked_commitment_tx);
2624
2625         mine_transactions(&nodes[1], &[revoked_commitment_tx, &justice_tx]);
2626         mine_transactions(&nodes[0], &[revoked_commitment_tx, &justice_tx]);
2627
2628         check_added_monitors!(nodes[1], 1);
2629         check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false,
2630                 &[nodes[0].node.get_our_node_id()], 100_000);
2631         get_announce_close_broadcast_events(&nodes, 1, 0);
2632
2633         check_added_monitors!(nodes[0], 1);
2634         check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false,
2635                 &[nodes[1].node.get_our_node_id()], 100_000);
2636
2637         // Check that the justice tx has sent the revoked output value to nodes[1]
2638         let monitor = get_monitor!(nodes[1], channel_id);
2639         let total_claimable_balance = monitor.get_claimable_balances().iter().fold(0, |sum, balance| {
2640                 match balance {
2641                         channelmonitor::Balance::ClaimableAwaitingConfirmations { amount_satoshis, .. } => sum + amount_satoshis,
2642                         _ => panic!("Unexpected balance type"),
2643                 }
2644         });
2645         // On the first commitment, node[1]'s balance was below dust so it didn't have an output
2646         let node1_channel_balance = if broadcast_initial_commitment { 0 } else { revoked_commitment_tx.output[0].value };
2647         let expected_claimable_balance = node1_channel_balance + justice_tx.output[0].value;
2648         assert_eq!(total_claimable_balance, expected_claimable_balance);
2649 }
2650
2651
2652 #[test]
2653 fn claim_htlc_outputs_shared_tx() {
2654         // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx
2655         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2656         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2657         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2658         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2659         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2660
2661         // Create some new channel:
2662         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2663
2664         // Rebalance the network to generate htlc in the two directions
2665         send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
2666         // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx
2667         let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
2668         let (_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
2669
2670         // Get the will-be-revoked local txn from node[0]
2671         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2672         assert_eq!(revoked_local_txn.len(), 2); // commitment tx + 1 HTLC-Timeout tx
2673         assert_eq!(revoked_local_txn[0].input.len(), 1);
2674         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
2675         assert_eq!(revoked_local_txn[1].input.len(), 1);
2676         assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
2677         assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
2678         check_spends!(revoked_local_txn[1], revoked_local_txn[0]);
2679
2680         //Revoke the old state
2681         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
2682
2683         {
2684                 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2685                 check_added_monitors!(nodes[0], 1);
2686                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2687                 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2688                 check_added_monitors!(nodes[1], 1);
2689                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2690                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2691                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
2692
2693                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2694                 assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2695
2696                 assert_eq!(node_txn[0].input.len(), 3); // Claim the revoked output + both revoked HTLC outputs
2697                 check_spends!(node_txn[0], revoked_local_txn[0]);
2698
2699                 let mut witness_lens = BTreeSet::new();
2700                 witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
2701                 witness_lens.insert(node_txn[0].input[1].witness.last().unwrap().len());
2702                 witness_lens.insert(node_txn[0].input[2].witness.last().unwrap().len());
2703                 assert_eq!(witness_lens.len(), 3);
2704                 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
2705                 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
2706                 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
2707
2708                 // Finally, mine the penalty transaction and check that we get an HTLC failure after
2709                 // ANTI_REORG_DELAY confirmations.
2710                 mine_transaction(&nodes[1], &node_txn[0]);
2711                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2712                 expect_payment_failed!(nodes[1], payment_hash_2, false);
2713         }
2714         get_announce_close_broadcast_events(&nodes, 0, 1);
2715         assert_eq!(nodes[0].node.list_channels().len(), 0);
2716         assert_eq!(nodes[1].node.list_channels().len(), 0);
2717 }
2718
2719 #[test]
2720 fn claim_htlc_outputs_single_tx() {
2721         // Node revoked old state, htlcs have timed out, claim each of them in separated justice tx
2722         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2723         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2724         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2725         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2726         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2727
2728         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2729
2730         // Rebalance the network to generate htlc in the two directions
2731         send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
2732         // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this
2733         // time as two different claim transactions as we're gonna to timeout htlc with given a high current height
2734         let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
2735         let (_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
2736
2737         // Get the will-be-revoked local txn from node[0]
2738         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2739
2740         //Revoke the old state
2741         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
2742
2743         {
2744                 confirm_transaction_at(&nodes[0], &revoked_local_txn[0], 100);
2745                 check_added_monitors!(nodes[0], 1);
2746                 confirm_transaction_at(&nodes[1], &revoked_local_txn[0], 100);
2747                 check_added_monitors!(nodes[1], 1);
2748                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2749                 let mut events = nodes[0].node.get_and_clear_pending_events();
2750                 expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
2751                 match events.last().unwrap() {
2752                         Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
2753                         _ => panic!("Unexpected event"),
2754                 }
2755
2756                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2757                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
2758
2759                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcast();
2760
2761                 // Check the pair local commitment and HTLC-timeout broadcast due to HTLC expiration
2762                 assert_eq!(node_txn[0].input.len(), 1);
2763                 check_spends!(node_txn[0], chan_1.3);
2764                 assert_eq!(node_txn[1].input.len(), 1);
2765                 let witness_script = node_txn[1].input[0].witness.last().unwrap();
2766                 assert_eq!(witness_script.len(), OFFERED_HTLC_SCRIPT_WEIGHT); //Spending an offered htlc output
2767                 check_spends!(node_txn[1], node_txn[0]);
2768
2769                 // Filter out any non justice transactions.
2770                 node_txn.retain(|tx| tx.input[0].previous_output.txid == revoked_local_txn[0].txid());
2771                 assert!(node_txn.len() > 3);
2772
2773                 assert_eq!(node_txn[0].input.len(), 1);
2774                 assert_eq!(node_txn[1].input.len(), 1);
2775                 assert_eq!(node_txn[2].input.len(), 1);
2776
2777                 check_spends!(node_txn[0], revoked_local_txn[0]);
2778                 check_spends!(node_txn[1], revoked_local_txn[0]);
2779                 check_spends!(node_txn[2], revoked_local_txn[0]);
2780
2781                 let mut witness_lens = BTreeSet::new();
2782                 witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
2783                 witness_lens.insert(node_txn[1].input[0].witness.last().unwrap().len());
2784                 witness_lens.insert(node_txn[2].input[0].witness.last().unwrap().len());
2785                 assert_eq!(witness_lens.len(), 3);
2786                 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
2787                 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
2788                 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
2789
2790                 // Finally, mine the penalty transactions and check that we get an HTLC failure after
2791                 // ANTI_REORG_DELAY confirmations.
2792                 mine_transaction(&nodes[1], &node_txn[0]);
2793                 mine_transaction(&nodes[1], &node_txn[1]);
2794                 mine_transaction(&nodes[1], &node_txn[2]);
2795                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2796                 expect_payment_failed!(nodes[1], payment_hash_2, false);
2797         }
2798         get_announce_close_broadcast_events(&nodes, 0, 1);
2799         assert_eq!(nodes[0].node.list_channels().len(), 0);
2800         assert_eq!(nodes[1].node.list_channels().len(), 0);
2801 }
2802
2803 #[test]
2804 fn test_htlc_on_chain_success() {
2805         // Test that in case of a unilateral close onchain, we detect the state of output and pass
2806         // the preimage backward accordingly. So here we test that ChannelManager is
2807         // broadcasting the right event to other nodes in payment path.
2808         // We test with two HTLCs simultaneously as that was not handled correctly in the past.
2809         // A --------------------> B ----------------------> C (preimage)
2810         // First, C should claim the HTLC outputs via HTLC-Success when its own latest local
2811         // commitment transaction was broadcast.
2812         // Then, B should learn the preimage from said transactions, attempting to claim backwards
2813         // towards B.
2814         // B should be able to claim via preimage if A then broadcasts its local tx.
2815         // Finally, when A sees B's latest local commitment transaction it should be able to claim
2816         // the HTLC outputs via the preimage it learned (which, once confirmed should generate a
2817         // PaymentSent event).
2818
2819         let chanmon_cfgs = create_chanmon_cfgs(3);
2820         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2821         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2822         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2823
2824         // Create some initial channels
2825         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2826         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
2827
2828         // Ensure all nodes are at the same height
2829         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
2830         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
2831         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
2832         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
2833
2834         // Rebalance the network a bit by relaying one payment through all the channels...
2835         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2836         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2837
2838         let (our_payment_preimage, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
2839         let (our_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
2840
2841         // Broadcast legit commitment tx from C on B's chain
2842         // Broadcast HTLC Success transaction by C on received output from C's commitment tx on B's chain
2843         let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
2844         assert_eq!(commitment_tx.len(), 1);
2845         check_spends!(commitment_tx[0], chan_2.3);
2846         nodes[2].node.claim_funds(our_payment_preimage);
2847         expect_payment_claimed!(nodes[2], payment_hash_1, 3_000_000);
2848         nodes[2].node.claim_funds(our_payment_preimage_2);
2849         expect_payment_claimed!(nodes[2], payment_hash_2, 3_000_000);
2850         check_added_monitors!(nodes[2], 2);
2851         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2852         assert!(updates.update_add_htlcs.is_empty());
2853         assert!(updates.update_fail_htlcs.is_empty());
2854         assert!(updates.update_fail_malformed_htlcs.is_empty());
2855         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2856
2857         mine_transaction(&nodes[2], &commitment_tx[0]);
2858         check_closed_broadcast!(nodes[2], true);
2859         check_added_monitors!(nodes[2], 1);
2860         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2861         let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 2 (2 * HTLC-Success tx)
2862         assert_eq!(node_txn.len(), 2);
2863         check_spends!(node_txn[0], commitment_tx[0]);
2864         check_spends!(node_txn[1], commitment_tx[0]);
2865         assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2866         assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2867         assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2868         assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2869         assert_eq!(node_txn[0].lock_time, LockTime::ZERO);
2870         assert_eq!(node_txn[1].lock_time, LockTime::ZERO);
2871
2872         // Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward
2873         connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), node_txn[0].clone(), node_txn[1].clone()]));
2874         connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
2875         {
2876                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
2877                 assert_eq!(added_monitors.len(), 1);
2878                 assert_eq!(added_monitors[0].0.txid, chan_2.3.txid());
2879                 added_monitors.clear();
2880         }
2881         let forwarded_events = nodes[1].node.get_and_clear_pending_events();
2882         assert_eq!(forwarded_events.len(), 3);
2883         match forwarded_events[0] {
2884                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
2885                 _ => panic!("Unexpected event"),
2886         }
2887         let chan_id = Some(chan_1.2);
2888         match forwarded_events[1] {
2889                 Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
2890                         next_channel_id, outbound_amount_forwarded_msat, ..
2891                 } => {
2892                         assert_eq!(total_fee_earned_msat, Some(1000));
2893                         assert_eq!(prev_channel_id, chan_id);
2894                         assert_eq!(claim_from_onchain_tx, true);
2895                         assert_eq!(next_channel_id, Some(chan_2.2));
2896                         assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
2897                 },
2898                 _ => panic!()
2899         }
2900         match forwarded_events[2] {
2901                 Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
2902                         next_channel_id, outbound_amount_forwarded_msat, ..
2903                 } => {
2904                         assert_eq!(total_fee_earned_msat, Some(1000));
2905                         assert_eq!(prev_channel_id, chan_id);
2906                         assert_eq!(claim_from_onchain_tx, true);
2907                         assert_eq!(next_channel_id, Some(chan_2.2));
2908                         assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
2909                 },
2910                 _ => panic!()
2911         }
2912         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
2913         {
2914                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
2915                 assert_eq!(added_monitors.len(), 2);
2916                 assert_eq!(added_monitors[0].0.txid, chan_1.3.txid());
2917                 assert_eq!(added_monitors[1].0.txid, chan_1.3.txid());
2918                 added_monitors.clear();
2919         }
2920         assert_eq!(events.len(), 3);
2921
2922         let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
2923         let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events);
2924
2925         match nodes_2_event {
2926                 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id: _ } => {},
2927                 _ => panic!("Unexpected event"),
2928         }
2929
2930         match nodes_0_event {
2931                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
2932                         assert!(update_add_htlcs.is_empty());
2933                         assert!(update_fail_htlcs.is_empty());
2934                         assert_eq!(update_fulfill_htlcs.len(), 1);
2935                         assert!(update_fail_malformed_htlcs.is_empty());
2936                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
2937                 },
2938                 _ => panic!("Unexpected event"),
2939         };
2940
2941         // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
2942         match events[0] {
2943                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
2944                 _ => panic!("Unexpected event"),
2945         }
2946
2947         macro_rules! check_tx_local_broadcast {
2948                 ($node: expr, $htlc_offered: expr, $commitment_tx: expr) => { {
2949                         let mut node_txn = $node.tx_broadcaster.txn_broadcasted.lock().unwrap();
2950                         assert_eq!(node_txn.len(), 2);
2951                         // Node[1]: 2 * HTLC-timeout tx
2952                         // Node[0]: 2 * HTLC-timeout tx
2953                         check_spends!(node_txn[0], $commitment_tx);
2954                         check_spends!(node_txn[1], $commitment_tx);
2955                         assert_ne!(node_txn[0].lock_time, LockTime::ZERO);
2956                         assert_ne!(node_txn[1].lock_time, LockTime::ZERO);
2957                         if $htlc_offered {
2958                                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2959                                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2960                                 assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2961                                 assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2962                         } else {
2963                                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2964                                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2965                                 assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
2966                                 assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
2967                         }
2968                         node_txn.clear();
2969                 } }
2970         }
2971         // nodes[1] now broadcasts its own timeout-claim of the output that nodes[2] just claimed via success.
2972         check_tx_local_broadcast!(nodes[1], false, commitment_tx[0]);
2973
2974         // Broadcast legit commitment tx from A on B's chain
2975         // Broadcast preimage tx by B on offered output from A commitment tx  on A's chain
2976         let node_a_commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
2977         check_spends!(node_a_commitment_tx[0], chan_1.3);
2978         mine_transaction(&nodes[1], &node_a_commitment_tx[0]);
2979         check_closed_broadcast!(nodes[1], true);
2980         check_added_monitors!(nodes[1], 1);
2981         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2982         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
2983         assert!(node_txn.len() == 1 || node_txn.len() == 3); // HTLC-Success, 2* RBF bumps of above HTLC txn
2984         let commitment_spend =
2985                 if node_txn.len() == 1 {
2986                         &node_txn[0]
2987                 } else {
2988                         // Certain `ConnectStyle`s will cause RBF bumps of the previous HTLC transaction to be broadcast.
2989                         // FullBlockViaListen
2990                         if node_txn[0].input[0].previous_output.txid == node_a_commitment_tx[0].txid() {
2991                                 check_spends!(node_txn[1], commitment_tx[0]);
2992                                 check_spends!(node_txn[2], commitment_tx[0]);
2993                                 assert_ne!(node_txn[1].input[0].previous_output.vout, node_txn[2].input[0].previous_output.vout);
2994                                 &node_txn[0]
2995                         } else {
2996                                 check_spends!(node_txn[0], commitment_tx[0]);
2997                                 check_spends!(node_txn[1], commitment_tx[0]);
2998                                 assert_ne!(node_txn[0].input[0].previous_output.vout, node_txn[1].input[0].previous_output.vout);
2999                                 &node_txn[2]
3000                         }
3001                 };
3002
3003         check_spends!(commitment_spend, node_a_commitment_tx[0]);
3004         assert_eq!(commitment_spend.input.len(), 2);
3005         assert_eq!(commitment_spend.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
3006         assert_eq!(commitment_spend.input[1].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
3007         assert_eq!(commitment_spend.lock_time.to_consensus_u32(), nodes[1].best_block_info().1);
3008         assert!(commitment_spend.output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
3009         // We don't bother to check that B can claim the HTLC output on its commitment tx here as
3010         // we already checked the same situation with A.
3011
3012         // Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent
3013         connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_a_commitment_tx[0].clone(), commitment_spend.clone()]));
3014         connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
3015         check_closed_broadcast!(nodes[0], true);
3016         check_added_monitors!(nodes[0], 1);
3017         let events = nodes[0].node.get_and_clear_pending_events();
3018         assert_eq!(events.len(), 5);
3019         let mut first_claimed = false;
3020         for event in events {
3021                 match event {
3022                         Event::PaymentSent { payment_preimage, payment_hash, .. } => {
3023                                 if payment_preimage == our_payment_preimage && payment_hash == payment_hash_1 {
3024                                         assert!(!first_claimed);
3025                                         first_claimed = true;
3026                                 } else {
3027                                         assert_eq!(payment_preimage, our_payment_preimage_2);
3028                                         assert_eq!(payment_hash, payment_hash_2);
3029                                 }
3030                         },
3031                         Event::PaymentPathSuccessful { .. } => {},
3032                         Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {},
3033                         _ => panic!("Unexpected event"),
3034                 }
3035         }
3036         check_tx_local_broadcast!(nodes[0], true, node_a_commitment_tx[0]);
3037 }
3038
3039 fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
3040         // Test that in case of a unilateral close onchain, we detect the state of output and
3041         // timeout the HTLC backward accordingly. So here we test that ChannelManager is
3042         // broadcasting the right event to other nodes in payment path.
3043         // A ------------------> B ----------------------> C (timeout)
3044         //    B's commitment tx                 C's commitment tx
3045         //            \                                  \
3046         //         B's HTLC timeout tx               B's timeout tx
3047
3048         let chanmon_cfgs = create_chanmon_cfgs(3);
3049         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3050         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3051         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3052         *nodes[0].connect_style.borrow_mut() = connect_style;
3053         *nodes[1].connect_style.borrow_mut() = connect_style;
3054         *nodes[2].connect_style.borrow_mut() = connect_style;
3055
3056         // Create some intial channels
3057         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
3058         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3059
3060         // Rebalance the network a bit by relaying one payment thorugh all the channels...
3061         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
3062         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
3063
3064         let (_payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
3065
3066         // Broadcast legit commitment tx from C on B's chain
3067         let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
3068         check_spends!(commitment_tx[0], chan_2.3);
3069         nodes[2].node.fail_htlc_backwards(&payment_hash);
3070         check_added_monitors!(nodes[2], 0);
3071         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]);
3072         check_added_monitors!(nodes[2], 1);
3073
3074         let events = nodes[2].node.get_and_clear_pending_msg_events();
3075         assert_eq!(events.len(), 1);
3076         match events[0] {
3077                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3078                         assert!(update_add_htlcs.is_empty());
3079                         assert!(!update_fail_htlcs.is_empty());
3080                         assert!(update_fulfill_htlcs.is_empty());
3081                         assert!(update_fail_malformed_htlcs.is_empty());
3082                         assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
3083                 },
3084                 _ => panic!("Unexpected event"),
3085         };
3086         mine_transaction(&nodes[2], &commitment_tx[0]);
3087         check_closed_broadcast!(nodes[2], true);
3088         check_added_monitors!(nodes[2], 1);
3089         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
3090         let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
3091         assert_eq!(node_txn.len(), 0);
3092
3093         // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain
3094         // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
3095         mine_transaction(&nodes[1], &commitment_tx[0]);
3096         check_closed_event!(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false
3097                 , [nodes[2].node.get_our_node_id()], 100000);
3098         connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1);
3099         let timeout_tx = {
3100                 let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
3101                 if nodes[1].connect_style.borrow().skips_blocks() {
3102                         assert_eq!(txn.len(), 1);
3103                 } else {
3104                         assert_eq!(txn.len(), 3); // Two extra fee bumps for timeout transaction
3105                 }
3106                 txn.iter().for_each(|tx| check_spends!(tx, commitment_tx[0]));
3107                 assert_eq!(txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3108                 txn.remove(0)
3109         };
3110
3111         mine_transaction(&nodes[1], &timeout_tx);
3112         check_added_monitors!(nodes[1], 1);
3113         check_closed_broadcast!(nodes[1], true);
3114
3115         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3116
3117         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
3118         check_added_monitors!(nodes[1], 1);
3119         let events = nodes[1].node.get_and_clear_pending_msg_events();
3120         assert_eq!(events.len(), 1);
3121         match events[0] {
3122                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3123                         assert!(update_add_htlcs.is_empty());
3124                         assert!(!update_fail_htlcs.is_empty());
3125                         assert!(update_fulfill_htlcs.is_empty());
3126                         assert!(update_fail_malformed_htlcs.is_empty());
3127                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3128                 },
3129                 _ => panic!("Unexpected event"),
3130         };
3131
3132         // Broadcast legit commitment tx from B on A's chain
3133         let commitment_tx = get_local_commitment_txn!(nodes[1], chan_1.2);
3134         check_spends!(commitment_tx[0], chan_1.3);
3135
3136         mine_transaction(&nodes[0], &commitment_tx[0]);
3137         connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
3138
3139         check_closed_broadcast!(nodes[0], true);
3140         check_added_monitors!(nodes[0], 1);
3141         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
3142         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // 1 timeout tx
3143         assert_eq!(node_txn.len(), 1);
3144         check_spends!(node_txn[0], commitment_tx[0]);
3145         assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3146 }
3147
3148 #[test]
3149 fn test_htlc_on_chain_timeout() {
3150         do_test_htlc_on_chain_timeout(ConnectStyle::BestBlockFirstSkippingBlocks);
3151         do_test_htlc_on_chain_timeout(ConnectStyle::TransactionsFirstSkippingBlocks);
3152         do_test_htlc_on_chain_timeout(ConnectStyle::FullBlockViaListen);
3153 }
3154
3155 #[test]
3156 fn test_simple_commitment_revoked_fail_backward() {
3157         // Test that in case of a revoked commitment tx, we detect the resolution of output by justice tx
3158         // and fail backward accordingly.
3159
3160         let chanmon_cfgs = create_chanmon_cfgs(3);
3161         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3162         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3163         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3164
3165         // Create some initial channels
3166         create_announced_chan_between_nodes(&nodes, 0, 1);
3167         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3168
3169         let (payment_preimage, _payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
3170         // Get the will-be-revoked local txn from nodes[2]
3171         let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
3172         // Revoke the old state
3173         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
3174
3175         let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
3176
3177         mine_transaction(&nodes[1], &revoked_local_txn[0]);
3178         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
3179         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3180         check_added_monitors!(nodes[1], 1);
3181         check_closed_broadcast!(nodes[1], true);
3182
3183         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
3184         check_added_monitors!(nodes[1], 1);
3185         let events = nodes[1].node.get_and_clear_pending_msg_events();
3186         assert_eq!(events.len(), 1);
3187         match events[0] {
3188                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
3189                         assert!(update_add_htlcs.is_empty());
3190                         assert_eq!(update_fail_htlcs.len(), 1);
3191                         assert!(update_fulfill_htlcs.is_empty());
3192                         assert!(update_fail_malformed_htlcs.is_empty());
3193                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3194
3195                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
3196                         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
3197                         expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_2.0.contents.short_channel_id, true);
3198                 },
3199                 _ => panic!("Unexpected event"),
3200         }
3201 }
3202
3203 fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use_dust: bool, no_to_remote: bool) {
3204         // Test that if our counterparty broadcasts a revoked commitment transaction we fail all
3205         // pending HTLCs on that channel backwards even if the HTLCs aren't present in our latest
3206         // commitment transaction anymore.
3207         // To do this, we have the peer which will broadcast a revoked commitment transaction send
3208         // a number of update_fail/commitment_signed updates without ever sending the RAA in
3209         // response to our commitment_signed. This is somewhat misbehavior-y, though not
3210         // technically disallowed and we should probably handle it reasonably.
3211         // Note that this is pretty exhaustive as an outbound HTLC which we haven't yet
3212         // failed/fulfilled backwards must be in at least one of the latest two remote commitment
3213         // transactions:
3214         // * Once we move it out of our holding cell/add it, we will immediately include it in a
3215         //   commitment_signed (implying it will be in the latest remote commitment transaction).
3216         // * Once they remove it, we will send a (the first) commitment_signed without the HTLC,
3217         //   and once they revoke the previous commitment transaction (allowing us to send a new
3218         //   commitment_signed) we will be free to fail/fulfill the HTLC backwards.
3219         let chanmon_cfgs = create_chanmon_cfgs(3);
3220         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3221         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3222         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3223
3224         // Create some initial channels
3225         create_announced_chan_between_nodes(&nodes, 0, 1);
3226         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3227
3228         let (payment_preimage, _payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], if no_to_remote { 10_000 } else { 3_000_000 });
3229         // Get the will-be-revoked local txn from nodes[2]
3230         let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
3231         assert_eq!(revoked_local_txn[0].output.len(), if no_to_remote { 1 } else { 2 });
3232         // Revoke the old state
3233         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
3234
3235         let value = if use_dust {
3236                 // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as
3237                 // well, so HTLCs at exactly the dust limit will not be included in commitment txn.
3238                 nodes[2].node.per_peer_state.read().unwrap().get(&nodes[1].node.get_our_node_id())
3239                         .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context().holder_dust_limit_satoshis * 1000
3240         } else { 3000000 };
3241
3242         let (_, first_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3243         let (_, second_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3244         let (_, third_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3245
3246         nodes[2].node.fail_htlc_backwards(&first_payment_hash);
3247         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]);
3248         check_added_monitors!(nodes[2], 1);
3249         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3250         assert!(updates.update_add_htlcs.is_empty());
3251         assert!(updates.update_fulfill_htlcs.is_empty());
3252         assert!(updates.update_fail_malformed_htlcs.is_empty());
3253         assert_eq!(updates.update_fail_htlcs.len(), 1);
3254         assert!(updates.update_fee.is_none());
3255         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3256         let bs_raa = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
3257         // Drop the last RAA from 3 -> 2
3258
3259         nodes[2].node.fail_htlc_backwards(&second_payment_hash);
3260         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: second_payment_hash }]);
3261         check_added_monitors!(nodes[2], 1);
3262         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3263         assert!(updates.update_add_htlcs.is_empty());
3264         assert!(updates.update_fulfill_htlcs.is_empty());
3265         assert!(updates.update_fail_malformed_htlcs.is_empty());
3266         assert_eq!(updates.update_fail_htlcs.len(), 1);
3267         assert!(updates.update_fee.is_none());
3268         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3269         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
3270         check_added_monitors!(nodes[1], 1);
3271         // Note that nodes[1] is in AwaitingRAA, so won't send a CS
3272         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
3273         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
3274         check_added_monitors!(nodes[2], 1);
3275
3276         nodes[2].node.fail_htlc_backwards(&third_payment_hash);
3277         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: third_payment_hash }]);
3278         check_added_monitors!(nodes[2], 1);
3279         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3280         assert!(updates.update_add_htlcs.is_empty());
3281         assert!(updates.update_fulfill_htlcs.is_empty());
3282         assert!(updates.update_fail_malformed_htlcs.is_empty());
3283         assert_eq!(updates.update_fail_htlcs.len(), 1);
3284         assert!(updates.update_fee.is_none());
3285         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3286         // At this point first_payment_hash has dropped out of the latest two commitment
3287         // transactions that nodes[1] is tracking...
3288         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
3289         check_added_monitors!(nodes[1], 1);
3290         // Note that nodes[1] is (still) in AwaitingRAA, so won't send a CS
3291         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
3292         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
3293         check_added_monitors!(nodes[2], 1);
3294
3295         // Add a fourth HTLC, this one will get sequestered away in nodes[1]'s holding cell waiting
3296         // on nodes[2]'s RAA.
3297         let (route, fourth_payment_hash, _, fourth_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 1000000);
3298         nodes[1].node.send_payment_with_route(&route, fourth_payment_hash,
3299                 RecipientOnionFields::secret_only(fourth_payment_secret), PaymentId(fourth_payment_hash.0)).unwrap();
3300         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3301         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3302         check_added_monitors!(nodes[1], 0);
3303
3304         if deliver_bs_raa {
3305                 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_raa);
3306                 // One monitor for the new revocation preimage, no second on as we won't generate a new
3307                 // commitment transaction for nodes[0] until process_pending_htlc_forwards().
3308                 check_added_monitors!(nodes[1], 1);
3309                 let events = nodes[1].node.get_and_clear_pending_events();
3310                 assert_eq!(events.len(), 2);
3311                 match events[0] {
3312                         Event::HTLCHandlingFailed { .. } => { },
3313                         _ => panic!("Unexpected event"),
3314                 }
3315                 match events[1] {
3316                         Event::PendingHTLCsForwardable { .. } => { },
3317                         _ => panic!("Unexpected event"),
3318                 };
3319                 // Deliberately don't process the pending fail-back so they all fail back at once after
3320                 // block connection just like the !deliver_bs_raa case
3321         }
3322
3323         let mut failed_htlcs = new_hash_set();
3324         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3325
3326         mine_transaction(&nodes[1], &revoked_local_txn[0]);
3327         check_added_monitors!(nodes[1], 1);
3328         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3329
3330         let events = nodes[1].node.get_and_clear_pending_events();
3331         assert_eq!(events.len(), if deliver_bs_raa { 3 + nodes.len() - 1 } else { 4 + nodes.len() });
3332         assert!(events.iter().any(|ev| matches!(
3333                 ev,
3334                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. }
3335         )));
3336         assert!(events.iter().any(|ev| matches!(
3337                 ev,
3338                 Event::PaymentPathFailed { ref payment_hash, .. } if *payment_hash == fourth_payment_hash
3339         )));
3340         assert!(events.iter().any(|ev| matches!(
3341                 ev,
3342                 Event::PaymentFailed { ref payment_hash, .. } if *payment_hash == fourth_payment_hash
3343         )));
3344
3345         nodes[1].node.process_pending_htlc_forwards();
3346         check_added_monitors!(nodes[1], 1);
3347
3348         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
3349         assert_eq!(events.len(), if deliver_bs_raa { 4 } else { 3 });
3350
3351         if deliver_bs_raa {
3352                 let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
3353                 match nodes_2_event {
3354                         MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3355                                 assert_eq!(nodes[2].node.get_our_node_id(), *node_id);
3356                                 assert_eq!(update_add_htlcs.len(), 1);
3357                                 assert!(update_fulfill_htlcs.is_empty());
3358                                 assert!(update_fail_htlcs.is_empty());
3359                                 assert!(update_fail_malformed_htlcs.is_empty());
3360                         },
3361                         _ => panic!("Unexpected event"),
3362                 }
3363         }
3364
3365         let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
3366         match nodes_2_event {
3367                 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { msg: Some(msgs::ErrorMessage { channel_id, ref data }) }, node_id: _ } => {
3368                         assert_eq!(channel_id, chan_2.2);
3369                         assert_eq!(data.as_str(), "Channel closed because commitment or closing transaction was confirmed on chain.");
3370                 },
3371                 _ => panic!("Unexpected event"),
3372         }
3373
3374         let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events);
3375         match nodes_0_event {
3376                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
3377                         assert!(update_add_htlcs.is_empty());
3378                         assert_eq!(update_fail_htlcs.len(), 3);
3379                         assert!(update_fulfill_htlcs.is_empty());
3380                         assert!(update_fail_malformed_htlcs.is_empty());
3381                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3382
3383                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
3384                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[1]);
3385                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[2]);
3386
3387                         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
3388
3389                         let events = nodes[0].node.get_and_clear_pending_events();
3390                         assert_eq!(events.len(), 6);
3391                         match events[0] {
3392                                 Event::PaymentPathFailed { ref payment_hash, ref failure, .. } => {
3393                                         assert!(failed_htlcs.insert(payment_hash.0));
3394                                         // If we delivered B's RAA we got an unknown preimage error, not something
3395                                         // that we should update our routing table for.
3396                                         if !deliver_bs_raa {
3397                                                 if let PathFailure::OnPath { network_update: Some(_) } = failure { } else { panic!("Unexpected path failure") }
3398                                         }
3399                                 },
3400                                 _ => panic!("Unexpected event"),
3401                         }
3402                         match events[1] {
3403                                 Event::PaymentFailed { ref payment_hash, .. } => {
3404                                         assert_eq!(*payment_hash, first_payment_hash);
3405                                 },
3406                                 _ => panic!("Unexpected event"),
3407                         }
3408                         match events[2] {
3409                                 Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => {
3410                                         assert!(failed_htlcs.insert(payment_hash.0));
3411                                 },
3412                                 _ => panic!("Unexpected event"),
3413                         }
3414                         match events[3] {
3415                                 Event::PaymentFailed { ref payment_hash, .. } => {
3416                                         assert_eq!(*payment_hash, second_payment_hash);
3417                                 },
3418                                 _ => panic!("Unexpected event"),
3419                         }
3420                         match events[4] {
3421                                 Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => {
3422                                         assert!(failed_htlcs.insert(payment_hash.0));
3423                                 },
3424                                 _ => panic!("Unexpected event"),
3425                         }
3426                         match events[5] {
3427                                 Event::PaymentFailed { ref payment_hash, .. } => {
3428                                         assert_eq!(*payment_hash, third_payment_hash);
3429                                 },
3430                                 _ => panic!("Unexpected event"),
3431                         }
3432                 },
3433                 _ => panic!("Unexpected event"),
3434         }
3435
3436         // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
3437         match events[0] {
3438                 MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {},
3439                 _ => panic!("Unexpected event"),
3440         }
3441
3442         assert!(failed_htlcs.contains(&first_payment_hash.0));
3443         assert!(failed_htlcs.contains(&second_payment_hash.0));
3444         assert!(failed_htlcs.contains(&third_payment_hash.0));
3445 }
3446
3447 #[test]
3448 fn test_commitment_revoked_fail_backward_exhaustive_a() {
3449         do_test_commitment_revoked_fail_backward_exhaustive(false, true, false);
3450         do_test_commitment_revoked_fail_backward_exhaustive(true, true, false);
3451         do_test_commitment_revoked_fail_backward_exhaustive(false, false, false);
3452         do_test_commitment_revoked_fail_backward_exhaustive(true, false, false);
3453 }
3454
3455 #[test]
3456 fn test_commitment_revoked_fail_backward_exhaustive_b() {
3457         do_test_commitment_revoked_fail_backward_exhaustive(false, true, true);
3458         do_test_commitment_revoked_fail_backward_exhaustive(true, true, true);
3459         do_test_commitment_revoked_fail_backward_exhaustive(false, false, true);
3460         do_test_commitment_revoked_fail_backward_exhaustive(true, false, true);
3461 }
3462
3463 #[test]
3464 fn fail_backward_pending_htlc_upon_channel_failure() {
3465         let chanmon_cfgs = create_chanmon_cfgs(2);
3466         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3467         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3468         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3469         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000);
3470
3471         // Alice -> Bob: Route a payment but without Bob sending revoke_and_ack.
3472         {
3473                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000);
3474                 nodes[0].node.send_payment_with_route(&route, payment_hash, RecipientOnionFields::secret_only(payment_secret),
3475                         PaymentId(payment_hash.0)).unwrap();
3476                 check_added_monitors!(nodes[0], 1);
3477
3478                 let payment_event = {
3479                         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3480                         assert_eq!(events.len(), 1);
3481                         SendEvent::from_event(events.remove(0))
3482                 };
3483                 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
3484                 assert_eq!(payment_event.msgs.len(), 1);
3485         }
3486
3487         // Alice -> Bob: Route another payment but now Alice waits for Bob's earlier revoke_and_ack.
3488         let (route, failed_payment_hash, _, failed_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000);
3489         {
3490                 nodes[0].node.send_payment_with_route(&route, failed_payment_hash,
3491                         RecipientOnionFields::secret_only(failed_payment_secret), PaymentId(failed_payment_hash.0)).unwrap();
3492                 check_added_monitors!(nodes[0], 0);
3493
3494                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3495         }
3496
3497         // Alice <- Bob: Send a malformed update_add_htlc so Alice fails the channel.
3498         {
3499                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 50_000);
3500
3501                 let secp_ctx = Secp256k1::new();
3502                 let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
3503                 let current_height = nodes[1].node.best_block.read().unwrap().height + 1;
3504                 let (onion_payloads, _amount_msat, cltv_expiry) = onion_utils::build_onion_payloads(
3505                         &route.paths[0], 50_000, RecipientOnionFields::secret_only(payment_secret), current_height, &None).unwrap();
3506                 let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
3507                 let onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
3508
3509                 // Send a 0-msat update_add_htlc to fail the channel.
3510                 let update_add_htlc = msgs::UpdateAddHTLC {
3511                         channel_id: chan.2,
3512                         htlc_id: 0,
3513                         amount_msat: 0,
3514                         payment_hash,
3515                         cltv_expiry,
3516                         onion_routing_packet,
3517                         skimmed_fee_msat: None,
3518                         blinding_point: None,
3519                 };
3520                 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc);
3521         }
3522         let events = nodes[0].node.get_and_clear_pending_events();
3523         assert_eq!(events.len(), 3);
3524         // Check that Alice fails backward the pending HTLC from the second payment.
3525         match events[0] {
3526                 Event::PaymentPathFailed { payment_hash, .. } => {
3527                         assert_eq!(payment_hash, failed_payment_hash);
3528                 },
3529                 _ => panic!("Unexpected event"),
3530         }
3531         match events[1] {
3532                 Event::PaymentFailed { payment_hash, .. } => {
3533                         assert_eq!(payment_hash, failed_payment_hash);
3534                 },
3535                 _ => panic!("Unexpected event"),
3536         }
3537         match events[2] {
3538                 Event::ChannelClosed { reason: ClosureReason::ProcessingError { ref err }, .. } => {
3539                         assert_eq!(err, "Remote side tried to send a 0-msat HTLC");
3540                 },
3541                 _ => panic!("Unexpected event {:?}", events[1]),
3542         }
3543         check_closed_broadcast!(nodes[0], true);
3544         check_added_monitors!(nodes[0], 1);
3545 }
3546
3547 #[test]
3548 fn test_htlc_ignore_latest_remote_commitment() {
3549         // Test that HTLC transactions spending the latest remote commitment transaction are simply
3550         // ignored if we cannot claim them. This originally tickled an invalid unwrap().
3551         let chanmon_cfgs = create_chanmon_cfgs(2);
3552         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3553         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3554         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3555         if *nodes[1].connect_style.borrow() == ConnectStyle::FullBlockViaListen {
3556                 // We rely on the ability to connect a block redundantly, which isn't allowed via
3557                 // `chain::Listen`, so we never run the test if we randomly get assigned that
3558                 // connect_style.
3559                 return;
3560         }
3561         let funding_tx = create_announced_chan_between_nodes(&nodes, 0, 1).3;
3562
3563         route_payment(&nodes[0], &[&nodes[1]], 10000000);
3564         nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
3565         connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
3566         check_closed_broadcast!(nodes[0], true);
3567         check_added_monitors!(nodes[0], 1);
3568         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
3569
3570         let node_txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
3571         assert_eq!(node_txn.len(), 2);
3572         check_spends!(node_txn[0], funding_tx);
3573         check_spends!(node_txn[1], node_txn[0]);
3574
3575         let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone()]);
3576         connect_block(&nodes[1], &block);
3577         check_closed_broadcast!(nodes[1], true);
3578         check_added_monitors!(nodes[1], 1);
3579         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
3580
3581         // Duplicate the connect_block call since this may happen due to other listeners
3582         // registering new transactions
3583         connect_block(&nodes[1], &block);
3584 }
3585
3586 #[test]
3587 fn test_force_close_fail_back() {
3588         // Check which HTLCs are failed-backwards on channel force-closure
3589         let chanmon_cfgs = create_chanmon_cfgs(3);
3590         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3591         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3592         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3593         create_announced_chan_between_nodes(&nodes, 0, 1);
3594         create_announced_chan_between_nodes(&nodes, 1, 2);
3595
3596         let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000);
3597
3598         let mut payment_event = {
3599                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
3600                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
3601                 check_added_monitors!(nodes[0], 1);
3602
3603                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3604                 assert_eq!(events.len(), 1);
3605                 SendEvent::from_event(events.remove(0))
3606         };
3607
3608         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
3609         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
3610
3611         expect_pending_htlcs_forwardable!(nodes[1]);
3612
3613         let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
3614         assert_eq!(events_2.len(), 1);
3615         payment_event = SendEvent::from_event(events_2.remove(0));
3616         assert_eq!(payment_event.msgs.len(), 1);
3617
3618         check_added_monitors!(nodes[1], 1);
3619         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
3620         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg);
3621         check_added_monitors!(nodes[2], 1);
3622         let (_, _) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3623
3624         // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous
3625         // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
3626         // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
3627
3628         nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
3629         check_closed_broadcast!(nodes[2], true);
3630         check_added_monitors!(nodes[2], 1);
3631         check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
3632         let commitment_tx = {
3633                 let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
3634                 // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
3635                 // have a use for it unless nodes[2] learns the preimage somehow, the funds will go
3636                 // back to nodes[1] upon timeout otherwise.
3637                 assert_eq!(node_txn.len(), 1);
3638                 node_txn.remove(0)
3639         };
3640
3641         mine_transaction(&nodes[1], &commitment_tx);
3642
3643         // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
3644         check_closed_broadcast!(nodes[1], true);
3645         check_added_monitors!(nodes[1], 1);
3646         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
3647
3648         // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
3649         {
3650                 get_monitor!(nodes[2], payment_event.commitment_msg.channel_id)
3651                         .provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator), &node_cfgs[2].logger);
3652         }
3653         mine_transaction(&nodes[2], &commitment_tx);
3654         let mut node_txn = nodes[2].tx_broadcaster.txn_broadcast();
3655         assert_eq!(node_txn.len(), if nodes[2].connect_style.borrow().updates_best_block_first() { 2 } else { 1 });
3656         let htlc_tx = node_txn.pop().unwrap();
3657         assert_eq!(htlc_tx.input.len(), 1);
3658         assert_eq!(htlc_tx.input[0].previous_output.txid, commitment_tx.txid());
3659         assert_eq!(htlc_tx.lock_time, LockTime::ZERO); // Must be an HTLC-Success
3660         assert_eq!(htlc_tx.input[0].witness.len(), 5); // Must be an HTLC-Success
3661
3662         check_spends!(htlc_tx, commitment_tx);
3663 }
3664
3665 #[test]
3666 fn test_dup_events_on_peer_disconnect() {
3667         // Test that if we receive a duplicative update_fulfill_htlc message after a reconnect we do
3668         // not generate a corresponding duplicative PaymentSent event. This did not use to be the case
3669         // as we used to generate the event immediately upon receipt of the payment preimage in the
3670         // update_fulfill_htlc message.
3671
3672         let chanmon_cfgs = create_chanmon_cfgs(2);
3673         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3674         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3675         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3676         create_announced_chan_between_nodes(&nodes, 0, 1);
3677
3678         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
3679
3680         nodes[1].node.claim_funds(payment_preimage);
3681         expect_payment_claimed!(nodes[1], payment_hash, 1_000_000);
3682         check_added_monitors!(nodes[1], 1);
3683         let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
3684         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]);
3685         expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
3686
3687         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3688         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3689
3690         let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3691         reconnect_args.pending_htlc_claims.0 = 1;
3692         reconnect_nodes(reconnect_args);
3693         expect_payment_path_successful!(nodes[0]);
3694 }
3695
3696 #[test]
3697 fn test_peer_disconnected_before_funding_broadcasted() {
3698         // Test that channels are closed with `ClosureReason::DisconnectedPeer` if the peer disconnects
3699         // before the funding transaction has been broadcasted, and doesn't reconnect back within time.
3700         let chanmon_cfgs = create_chanmon_cfgs(2);
3701         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3702         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3703         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3704
3705         // Open a channel between `nodes[0]` and `nodes[1]`, for which the funding transaction is never
3706         // broadcasted, even though it's created by `nodes[0]`.
3707         let expected_temporary_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap();
3708         let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
3709         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
3710         let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
3711         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
3712
3713         let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
3714         assert_eq!(temporary_channel_id, expected_temporary_channel_id);
3715
3716         assert!(nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
3717
3718         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
3719         assert_eq!(funding_created_msg.temporary_channel_id, expected_temporary_channel_id);
3720
3721         // Even though the funding transaction is created by `nodes[0]`, the `FundingCreated` msg is
3722         // never sent to `nodes[1]`, and therefore the tx is never signed by either party nor
3723         // broadcasted.
3724         {
3725                 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
3726         }
3727
3728         // The peers disconnect before the funding is broadcasted.
3729         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3730         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3731
3732         // The time for peers to reconnect expires.
3733         for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS {
3734                 nodes[0].node.timer_tick_occurred();
3735         }
3736
3737         // Ensure that the channel is closed with `ClosureReason::HolderForceClosed`
3738         // when the peers are disconnected and do not reconnect before the funding
3739         // transaction is broadcasted.
3740         check_closed_event!(&nodes[0], 2, ClosureReason::HolderForceClosed, true
3741                 , [nodes[1].node.get_our_node_id()], 1000000);
3742         check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false
3743                 , [nodes[0].node.get_our_node_id()], 1000000);
3744 }
3745
3746 #[test]
3747 fn test_simple_peer_disconnect() {
3748         // Test that we can reconnect when there are no lost messages
3749         let chanmon_cfgs = create_chanmon_cfgs(3);
3750         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3751         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3752         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3753         create_announced_chan_between_nodes(&nodes, 0, 1);
3754         create_announced_chan_between_nodes(&nodes, 1, 2);
3755
3756         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3757         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3758         let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3759         reconnect_args.send_channel_ready = (true, true);
3760         reconnect_nodes(reconnect_args);
3761
3762         let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
3763         let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3764         fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2);
3765         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1);
3766
3767         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3768         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3769         reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3770
3771         let (payment_preimage_3, payment_hash_3, ..) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000);
3772         let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
3773         let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3774         let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3775
3776         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3777         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3778
3779         claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_preimage_3);
3780         fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5);
3781
3782         let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3783         reconnect_args.pending_cell_htlc_fails.0 = 1;
3784         reconnect_args.pending_cell_htlc_claims.0 = 1;
3785         reconnect_nodes(reconnect_args);
3786         {
3787                 let events = nodes[0].node.get_and_clear_pending_events();
3788                 assert_eq!(events.len(), 4);
3789                 match events[0] {
3790                         Event::PaymentSent { payment_preimage, payment_hash, .. } => {
3791                                 assert_eq!(payment_preimage, payment_preimage_3);
3792                                 assert_eq!(payment_hash, payment_hash_3);
3793                         },
3794                         _ => panic!("Unexpected event"),
3795                 }
3796                 match events[1] {
3797                         Event::PaymentPathSuccessful { .. } => {},
3798                         _ => panic!("Unexpected event"),
3799                 }
3800                 match events[2] {
3801                         Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } => {
3802                                 assert_eq!(payment_hash, payment_hash_5);
3803                                 assert!(payment_failed_permanently);
3804                         },
3805                         _ => panic!("Unexpected event"),
3806                 }
3807                 match events[3] {
3808                         Event::PaymentFailed { payment_hash, .. } => {
3809                                 assert_eq!(payment_hash, payment_hash_5);
3810                         },
3811                         _ => panic!("Unexpected event"),
3812                 }
3813         }
3814         check_added_monitors(&nodes[0], 1);
3815
3816         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4);
3817         fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6);
3818 }
3819
3820 fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken_lnd: bool) {
3821         // Test that we can reconnect when in-flight HTLC updates get dropped
3822         let chanmon_cfgs = create_chanmon_cfgs(2);
3823         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3824         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3825         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3826
3827         let mut as_channel_ready = None;
3828         let channel_id = if messages_delivered == 0 {
3829                 let (channel_ready, chan_id, _) = create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001);
3830                 as_channel_ready = Some(channel_ready);
3831                 // nodes[1] doesn't receive the channel_ready message (it'll be re-sent on reconnect)
3832                 // Note that we store it so that if we're running with `simulate_broken_lnd` we can deliver
3833                 // it before the channel_reestablish message.
3834                 chan_id
3835         } else {
3836                 create_announced_chan_between_nodes(&nodes, 0, 1).2
3837         };
3838
3839         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000);
3840
3841         let payment_event = {
3842                 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
3843                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
3844                 check_added_monitors!(nodes[0], 1);
3845
3846                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3847                 assert_eq!(events.len(), 1);
3848                 SendEvent::from_event(events.remove(0))
3849         };
3850         assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id);
3851
3852         if messages_delivered < 2 {
3853                 // Drop the payment_event messages, and let them get re-generated in reconnect_nodes!
3854         } else {
3855                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
3856                 if messages_delivered >= 3 {
3857                         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
3858                         check_added_monitors!(nodes[1], 1);
3859                         let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
3860
3861                         if messages_delivered >= 4 {
3862                                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
3863                                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3864                                 check_added_monitors!(nodes[0], 1);
3865
3866                                 if messages_delivered >= 5 {
3867                                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
3868                                         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
3869                                         // No commitment_signed so get_event_msg's assert(len == 1) passes
3870                                         check_added_monitors!(nodes[0], 1);
3871
3872                                         if messages_delivered >= 6 {
3873                                                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
3874                                                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3875                                                 check_added_monitors!(nodes[1], 1);
3876                                         }
3877                                 }
3878                         }
3879                 }
3880         }
3881
3882         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3883         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3884         if messages_delivered < 3 {
3885                 if simulate_broken_lnd {
3886                         // lnd has a long-standing bug where they send a channel_ready prior to a
3887                         // channel_reestablish if you reconnect prior to channel_ready time.
3888                         //
3889                         // Here we simulate that behavior, delivering a channel_ready immediately on
3890                         // reconnect. Note that we don't bother skipping the now-duplicate channel_ready sent
3891                         // in `reconnect_nodes` but we currently don't fail based on that.
3892                         //
3893                         // See-also <https://github.com/lightningnetwork/lnd/issues/4006>
3894                         nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready.as_ref().unwrap().0);
3895                 }
3896                 // Even if the channel_ready messages get exchanged, as long as nothing further was
3897                 // received on either side, both sides will need to resend them.
3898                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3899                 reconnect_args.send_channel_ready = (true, true);
3900                 reconnect_args.pending_htlc_adds.1 = 1;
3901                 reconnect_nodes(reconnect_args);
3902         } else if messages_delivered == 3 {
3903                 // nodes[0] still wants its RAA + commitment_signed
3904                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3905                 reconnect_args.pending_responding_commitment_signed.0 = true;
3906                 reconnect_args.pending_raa.0 = true;
3907                 reconnect_nodes(reconnect_args);
3908         } else if messages_delivered == 4 {
3909                 // nodes[0] still wants its commitment_signed
3910                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3911                 reconnect_args.pending_responding_commitment_signed.0 = true;
3912                 reconnect_nodes(reconnect_args);
3913         } else if messages_delivered == 5 {
3914                 // nodes[1] still wants its final RAA
3915                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3916                 reconnect_args.pending_raa.1 = true;
3917                 reconnect_nodes(reconnect_args);
3918         } else if messages_delivered == 6 {
3919                 // Everything was delivered...
3920                 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3921         }
3922
3923         let events_1 = nodes[1].node.get_and_clear_pending_events();
3924         if messages_delivered == 0 {
3925                 assert_eq!(events_1.len(), 2);
3926                 match events_1[0] {
3927                         Event::ChannelReady { .. } => { },
3928                         _ => panic!("Unexpected event"),
3929                 };
3930                 match events_1[1] {
3931                         Event::PendingHTLCsForwardable { .. } => { },
3932                         _ => panic!("Unexpected event"),
3933                 };
3934         } else {
3935                 assert_eq!(events_1.len(), 1);
3936                 match events_1[0] {
3937                         Event::PendingHTLCsForwardable { .. } => { },
3938                         _ => panic!("Unexpected event"),
3939                 };
3940         }
3941
3942         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3943         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3944         reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3945
3946         nodes[1].node.process_pending_htlc_forwards();
3947
3948         let events_2 = nodes[1].node.get_and_clear_pending_events();
3949         assert_eq!(events_2.len(), 1);
3950         match events_2[0] {
3951                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
3952                         assert_eq!(payment_hash_1, *payment_hash);
3953                         assert_eq!(amount_msat, 1_000_000);
3954                         assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
3955                         assert_eq!(via_channel_id, Some(channel_id));
3956                         match &purpose {
3957                                 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
3958                                         assert!(payment_preimage.is_none());
3959                                         assert_eq!(payment_secret_1, *payment_secret);
3960                                 },
3961                                 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
3962                         }
3963                 },
3964                 _ => panic!("Unexpected event"),
3965         }
3966
3967         nodes[1].node.claim_funds(payment_preimage_1);
3968         check_added_monitors!(nodes[1], 1);
3969         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
3970
3971         let events_3 = nodes[1].node.get_and_clear_pending_msg_events();
3972         assert_eq!(events_3.len(), 1);
3973         let (update_fulfill_htlc, commitment_signed) = match events_3[0] {
3974                 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
3975                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
3976                         assert!(updates.update_add_htlcs.is_empty());
3977                         assert!(updates.update_fail_htlcs.is_empty());
3978                         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
3979                         assert!(updates.update_fail_malformed_htlcs.is_empty());
3980                         assert!(updates.update_fee.is_none());
3981                         (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
3982                 },
3983                 _ => panic!("Unexpected event"),
3984         };
3985
3986         if messages_delivered >= 1 {
3987                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlc);
3988
3989                 let events_4 = nodes[0].node.get_and_clear_pending_events();
3990                 assert_eq!(events_4.len(), 1);
3991                 match events_4[0] {
3992                         Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
3993                                 assert_eq!(payment_preimage_1, *payment_preimage);
3994                                 assert_eq!(payment_hash_1, *payment_hash);
3995                         },
3996                         _ => panic!("Unexpected event"),
3997                 }
3998
3999                 if messages_delivered >= 2 {
4000                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
4001                         check_added_monitors!(nodes[0], 1);
4002                         let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4003
4004                         if messages_delivered >= 3 {
4005                                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
4006                                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4007                                 check_added_monitors!(nodes[1], 1);
4008
4009                                 if messages_delivered >= 4 {
4010                                         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed);
4011                                         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4012                                         // No commitment_signed so get_event_msg's assert(len == 1) passes
4013                                         check_added_monitors!(nodes[1], 1);
4014
4015                                         if messages_delivered >= 5 {
4016                                                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
4017                                                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4018                                                 check_added_monitors!(nodes[0], 1);
4019                                         }
4020                                 }
4021                         }
4022                 }
4023         }
4024
4025         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
4026         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
4027         if messages_delivered < 2 {
4028                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4029                 reconnect_args.pending_htlc_claims.0 = 1;
4030                 reconnect_nodes(reconnect_args);
4031                 if messages_delivered < 1 {
4032                         expect_payment_sent!(nodes[0], payment_preimage_1);
4033                 } else {
4034                         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4035                 }
4036         } else if messages_delivered == 2 {
4037                 // nodes[0] still wants its RAA + commitment_signed
4038                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4039                 reconnect_args.pending_responding_commitment_signed.1 = true;
4040                 reconnect_args.pending_raa.1 = true;
4041                 reconnect_nodes(reconnect_args);
4042         } else if messages_delivered == 3 {
4043                 // nodes[0] still wants its commitment_signed
4044                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4045                 reconnect_args.pending_responding_commitment_signed.1 = true;
4046                 reconnect_nodes(reconnect_args);
4047         } else if messages_delivered == 4 {
4048                 // nodes[1] still wants its final RAA
4049                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4050                 reconnect_args.pending_raa.0 = true;
4051                 reconnect_nodes(reconnect_args);
4052         } else if messages_delivered == 5 {
4053                 // Everything was delivered...
4054                 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
4055         }
4056
4057         if messages_delivered == 1 || messages_delivered == 2 {
4058                 expect_payment_path_successful!(nodes[0]);
4059         }
4060         if messages_delivered <= 5 {
4061                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
4062                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
4063         }
4064         reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
4065
4066         if messages_delivered > 2 {
4067                 expect_payment_path_successful!(nodes[0]);
4068         }
4069
4070         // Channel should still work fine...
4071         let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
4072         let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
4073         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
4074 }
4075
4076 #[test]
4077 fn test_drop_messages_peer_disconnect_a() {
4078         do_test_drop_messages_peer_disconnect(0, true);
4079         do_test_drop_messages_peer_disconnect(0, false);
4080         do_test_drop_messages_peer_disconnect(1, false);
4081         do_test_drop_messages_peer_disconnect(2, false);
4082 }
4083
4084 #[test]
4085 fn test_drop_messages_peer_disconnect_b() {
4086         do_test_drop_messages_peer_disconnect(3, false);
4087         do_test_drop_messages_peer_disconnect(4, false);
4088         do_test_drop_messages_peer_disconnect(5, false);
4089         do_test_drop_messages_peer_disconnect(6, false);
4090 }
4091
4092 #[test]
4093 fn test_channel_ready_without_best_block_updated() {
4094         // Previously, if we were offline when a funding transaction was locked in, and then we came
4095         // back online, calling best_block_updated once followed by transactions_confirmed, we'd not
4096         // generate a channel_ready until a later best_block_updated. This tests that we generate the
4097         // channel_ready immediately instead.
4098         let chanmon_cfgs = create_chanmon_cfgs(2);
4099         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4100         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4101         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4102         *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
4103
4104         let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
4105
4106         let conf_height = nodes[0].best_block_info().1 + 1;
4107         connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
4108         let block_txn = [funding_tx];
4109         let conf_txn: Vec<_> = block_txn.iter().enumerate().collect();
4110         let conf_block_header = nodes[0].get_block_header(conf_height);
4111         nodes[0].node.transactions_confirmed(&conf_block_header, &conf_txn[..], conf_height);
4112
4113         // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
4114         let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
4115         nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
4116 }
4117
4118 #[test]
4119 fn test_channel_monitor_skipping_block_when_channel_manager_is_leading() {
4120         let chanmon_cfgs = create_chanmon_cfgs(2);
4121         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4122         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4123         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4124
4125         // Let channel_manager get ahead of chain_monitor by 1 block.
4126         // This is to emulate race-condition where newly added channel_monitor skips processing 1 block,
4127         // in case where client calls block_connect on channel_manager first and then on chain_monitor.
4128         let height_1 = nodes[0].best_block_info().1 + 1;
4129         let mut block_1 = create_dummy_block(nodes[0].best_block_hash(), height_1, Vec::new());
4130
4131         nodes[0].blocks.lock().unwrap().push((block_1.clone(), height_1));
4132         nodes[0].node.block_connected(&block_1, height_1);
4133
4134         // Create channel, and it gets added to chain_monitor in funding_created.
4135         let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
4136
4137         // Now, newly added channel_monitor in chain_monitor hasn't processed block_1,
4138         // but it's best_block is block_1, since that was populated by channel_manager, and channel_manager
4139         // was running ahead of chain_monitor at the time of funding_created.
4140         // Later on, subsequent blocks are connected to both channel_manager and chain_monitor.
4141         // Hence, this channel's channel_monitor skipped block_1, directly tries to process subsequent blocks.
4142         confirm_transaction_at(&nodes[0], &funding_tx, nodes[0].best_block_info().1 + 1);
4143         connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
4144
4145         // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
4146         let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
4147         nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
4148 }
4149
4150 #[test]
4151 fn test_channel_monitor_skipping_block_when_channel_manager_is_lagging() {
4152         let chanmon_cfgs = create_chanmon_cfgs(2);
4153         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4154         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4155         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4156
4157         // Let chain_monitor get ahead of channel_manager by 1 block.
4158         // This is to emulate race-condition where newly added channel_monitor skips processing 1 block,
4159         // in case where client calls block_connect on chain_monitor first and then on channel_manager.
4160         let height_1 = nodes[0].best_block_info().1 + 1;
4161         let mut block_1 = create_dummy_block(nodes[0].best_block_hash(), height_1, Vec::new());
4162
4163         nodes[0].blocks.lock().unwrap().push((block_1.clone(), height_1));
4164         nodes[0].chain_monitor.chain_monitor.block_connected(&block_1, height_1);
4165
4166         // Create channel, and it gets added to chain_monitor in funding_created.
4167         let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
4168
4169         // channel_manager can't really skip block_1, it should get it eventually.
4170         nodes[0].node.block_connected(&block_1, height_1);
4171
4172         // Now, newly added channel_monitor in chain_monitor hasn't processed block_1, it's best_block is
4173         // the block before block_1, since that was populated by channel_manager, and channel_manager was
4174         // running behind at the time of funding_created.
4175         // Later on, subsequent blocks are connected to both channel_manager and chain_monitor.
4176         // Hence, this channel's channel_monitor skipped block_1, directly tries to process subsequent blocks.
4177         confirm_transaction_at(&nodes[0], &funding_tx, nodes[0].best_block_info().1 + 1);
4178         connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
4179
4180         // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
4181         let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
4182         nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
4183 }
4184
4185 #[test]
4186 fn test_drop_messages_peer_disconnect_dual_htlc() {
4187         // Test that we can handle reconnecting when both sides of a channel have pending
4188         // commitment_updates when we disconnect.
4189         let chanmon_cfgs = create_chanmon_cfgs(2);
4190         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4191         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4192         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4193         create_announced_chan_between_nodes(&nodes, 0, 1);
4194
4195         let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
4196
4197         // Now try to send a second payment which will fail to send
4198         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
4199         nodes[0].node.send_payment_with_route(&route, payment_hash_2,
4200                 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
4201         check_added_monitors!(nodes[0], 1);
4202
4203         let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
4204         assert_eq!(events_1.len(), 1);
4205         match events_1[0] {
4206                 MessageSendEvent::UpdateHTLCs { .. } => {},
4207                 _ => panic!("Unexpected event"),
4208         }
4209
4210         nodes[1].node.claim_funds(payment_preimage_1);
4211         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
4212         check_added_monitors!(nodes[1], 1);
4213
4214         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
4215         assert_eq!(events_2.len(), 1);
4216         match events_2[0] {
4217                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
4218                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
4219                         assert!(update_add_htlcs.is_empty());
4220                         assert_eq!(update_fulfill_htlcs.len(), 1);
4221                         assert!(update_fail_htlcs.is_empty());
4222                         assert!(update_fail_malformed_htlcs.is_empty());
4223                         assert!(update_fee.is_none());
4224
4225                         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
4226                         let events_3 = nodes[0].node.get_and_clear_pending_events();
4227                         assert_eq!(events_3.len(), 1);
4228                         match events_3[0] {
4229                                 Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
4230                                         assert_eq!(*payment_preimage, payment_preimage_1);
4231                                         assert_eq!(*payment_hash, payment_hash_1);
4232                                 },
4233                                 _ => panic!("Unexpected event"),
4234                         }
4235
4236                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
4237                         let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4238                         // No commitment_signed so get_event_msg's assert(len == 1) passes
4239                         check_added_monitors!(nodes[0], 1);
4240                 },
4241                 _ => panic!("Unexpected event"),
4242         }
4243
4244         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
4245         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
4246
4247         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
4248                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
4249         }, true).unwrap();
4250         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
4251         assert_eq!(reestablish_1.len(), 1);
4252         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
4253                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
4254         }, false).unwrap();
4255         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
4256         assert_eq!(reestablish_2.len(), 1);
4257
4258         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
4259         let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
4260         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
4261         let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
4262
4263         assert!(as_resp.0.is_none());
4264         assert!(bs_resp.0.is_none());
4265
4266         assert!(bs_resp.1.is_none());
4267         assert!(bs_resp.2.is_none());
4268
4269         assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
4270
4271         assert_eq!(as_resp.2.as_ref().unwrap().update_add_htlcs.len(), 1);
4272         assert!(as_resp.2.as_ref().unwrap().update_fulfill_htlcs.is_empty());
4273         assert!(as_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
4274         assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
4275         assert!(as_resp.2.as_ref().unwrap().update_fee.is_none());
4276         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]);
4277         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed);
4278         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4279         // No commitment_signed so get_event_msg's assert(len == 1) passes
4280         check_added_monitors!(nodes[1], 1);
4281
4282         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap());
4283         let bs_second_commitment_signed = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4284         assert!(bs_second_commitment_signed.update_add_htlcs.is_empty());
4285         assert!(bs_second_commitment_signed.update_fulfill_htlcs.is_empty());
4286         assert!(bs_second_commitment_signed.update_fail_htlcs.is_empty());
4287         assert!(bs_second_commitment_signed.update_fail_malformed_htlcs.is_empty());
4288         assert!(bs_second_commitment_signed.update_fee.is_none());
4289         check_added_monitors!(nodes[1], 1);
4290
4291         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
4292         let as_commitment_signed = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4293         assert!(as_commitment_signed.update_add_htlcs.is_empty());
4294         assert!(as_commitment_signed.update_fulfill_htlcs.is_empty());
4295         assert!(as_commitment_signed.update_fail_htlcs.is_empty());
4296         assert!(as_commitment_signed.update_fail_malformed_htlcs.is_empty());
4297         assert!(as_commitment_signed.update_fee.is_none());
4298         check_added_monitors!(nodes[0], 1);
4299
4300         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed);
4301         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4302         // No commitment_signed so get_event_msg's assert(len == 1) passes
4303         check_added_monitors!(nodes[0], 1);
4304
4305         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed);
4306         let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4307         // No commitment_signed so get_event_msg's assert(len == 1) passes
4308         check_added_monitors!(nodes[1], 1);
4309
4310         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
4311         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4312         check_added_monitors!(nodes[1], 1);
4313
4314         expect_pending_htlcs_forwardable!(nodes[1]);
4315
4316         let events_5 = nodes[1].node.get_and_clear_pending_events();
4317         assert_eq!(events_5.len(), 1);
4318         match events_5[0] {
4319                 Event::PaymentClaimable { ref payment_hash, ref purpose, .. } => {
4320                         assert_eq!(payment_hash_2, *payment_hash);
4321                         match &purpose {
4322                                 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
4323                                         assert!(payment_preimage.is_none());
4324                                         assert_eq!(payment_secret_2, *payment_secret);
4325                                 },
4326                                 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
4327                         }
4328                 },
4329                 _ => panic!("Unexpected event"),
4330         }
4331
4332         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack);
4333         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4334         check_added_monitors!(nodes[0], 1);
4335
4336         expect_payment_path_successful!(nodes[0]);
4337         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
4338 }
4339
4340 fn do_test_htlc_timeout(send_partial_mpp: bool) {
4341         // If the user fails to claim/fail an HTLC within the HTLC CLTV timeout we fail it for them
4342         // to avoid our counterparty failing the channel.
4343         let chanmon_cfgs = create_chanmon_cfgs(2);
4344         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4345         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4346         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4347
4348         create_announced_chan_between_nodes(&nodes, 0, 1);
4349
4350         let our_payment_hash = if send_partial_mpp {
4351                 let (route, our_payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100000);
4352                 // Use the utility function send_payment_along_path to send the payment with MPP data which
4353                 // indicates there are more HTLCs coming.
4354                 let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
4355                 let payment_id = PaymentId([42; 32]);
4356                 let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
4357                         RecipientOnionFields::secret_only(payment_secret), payment_id, &route).unwrap();
4358                 nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash,
4359                         RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id,
4360                         &None, session_privs[0]).unwrap();
4361                 check_added_monitors!(nodes[0], 1);
4362                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
4363                 assert_eq!(events.len(), 1);
4364                 // Now do the relevant commitment_signed/RAA dances along the path, noting that the final
4365                 // hop should *not* yet generate any PaymentClaimable event(s).
4366                 pass_along_path(&nodes[0], &[&nodes[1]], 100000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
4367                 our_payment_hash
4368         } else {
4369                 route_payment(&nodes[0], &[&nodes[1]], 100000).1
4370         };
4371
4372         let mut block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new());
4373         connect_block(&nodes[0], &block);
4374         connect_block(&nodes[1], &block);
4375         let block_count = TEST_FINAL_CLTV + CHAN_CONFIRM_DEPTH + 2 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS;
4376         for _ in CHAN_CONFIRM_DEPTH + 2..block_count {
4377                 block.header.prev_blockhash = block.block_hash();
4378                 connect_block(&nodes[0], &block);
4379                 connect_block(&nodes[1], &block);
4380         }
4381
4382         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
4383
4384         check_added_monitors!(nodes[1], 1);
4385         let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4386         assert!(htlc_timeout_updates.update_add_htlcs.is_empty());
4387         assert_eq!(htlc_timeout_updates.update_fail_htlcs.len(), 1);
4388         assert!(htlc_timeout_updates.update_fail_malformed_htlcs.is_empty());
4389         assert!(htlc_timeout_updates.update_fee.is_none());
4390
4391         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_timeout_updates.update_fail_htlcs[0]);
4392         commitment_signed_dance!(nodes[0], nodes[1], htlc_timeout_updates.commitment_signed, false);
4393         // 100_000 msat as u64, followed by the height at which we failed back above
4394         let mut expected_failure_data = (100_000 as u64).to_be_bytes().to_vec();
4395         expected_failure_data.extend_from_slice(&(block_count - 1).to_be_bytes());
4396         expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000 | 15, &expected_failure_data[..]);
4397 }
4398
4399 #[test]
4400 fn test_htlc_timeout() {
4401         do_test_htlc_timeout(true);
4402         do_test_htlc_timeout(false);
4403 }
4404
4405 fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) {
4406         // Tests that HTLCs in the holding cell are timed out after the requisite number of blocks.
4407         let chanmon_cfgs = create_chanmon_cfgs(3);
4408         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
4409         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
4410         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
4411         create_announced_chan_between_nodes(&nodes, 0, 1);
4412         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4413
4414         // Make sure all nodes are at the same starting height
4415         connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
4416         connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
4417         connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
4418
4419         // Route a first payment to get the 1 -> 2 channel in awaiting_raa...
4420         let (route, first_payment_hash, _, first_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
4421         nodes[1].node.send_payment_with_route(&route, first_payment_hash,
4422                 RecipientOnionFields::secret_only(first_payment_secret), PaymentId(first_payment_hash.0)).unwrap();
4423         assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1);
4424         check_added_monitors!(nodes[1], 1);
4425
4426         // Now attempt to route a second payment, which should be placed in the holding cell
4427         let sending_node = if forwarded_htlc { &nodes[0] } else { &nodes[1] };
4428         let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(sending_node, nodes[2], 100000);
4429         sending_node.node.send_payment_with_route(&route, second_payment_hash,
4430                 RecipientOnionFields::secret_only(second_payment_secret), PaymentId(second_payment_hash.0)).unwrap();
4431         if forwarded_htlc {
4432                 check_added_monitors!(nodes[0], 1);
4433                 let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
4434                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
4435                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
4436                 expect_pending_htlcs_forwardable!(nodes[1]);
4437         }
4438         check_added_monitors!(nodes[1], 0);
4439
4440         connect_blocks(&nodes[1], TEST_FINAL_CLTV - LATENCY_GRACE_PERIOD_BLOCKS);
4441         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4442         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
4443         connect_blocks(&nodes[1], 1);
4444
4445         if forwarded_htlc {
4446                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
4447                 check_added_monitors!(nodes[1], 1);
4448                 let fail_commit = nodes[1].node.get_and_clear_pending_msg_events();
4449                 assert_eq!(fail_commit.len(), 1);
4450                 match fail_commit[0] {
4451                         MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, ref commitment_signed, .. }, .. } => {
4452                                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
4453                                 commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, true, true);
4454                         },
4455                         _ => unreachable!(),
4456                 }
4457                 expect_payment_failed_with_update!(nodes[0], second_payment_hash, false, chan_2.0.contents.short_channel_id, false);
4458         } else {
4459                 expect_payment_failed!(nodes[1], second_payment_hash, false);
4460         }
4461 }
4462
4463 #[test]
4464 fn test_holding_cell_htlc_add_timeouts() {
4465         do_test_holding_cell_htlc_add_timeouts(false);
4466         do_test_holding_cell_htlc_add_timeouts(true);
4467 }
4468
4469 macro_rules! check_spendable_outputs {
4470         ($node: expr, $keysinterface: expr) => {
4471                 {
4472                         let mut events = $node.chain_monitor.chain_monitor.get_and_clear_pending_events();
4473                         let mut txn = Vec::new();
4474                         let mut all_outputs = Vec::new();
4475                         let secp_ctx = Secp256k1::new();
4476                         for event in events.drain(..) {
4477                                 match event {
4478                                         Event::SpendableOutputs { mut outputs, channel_id: _ } => {
4479                                                 for outp in outputs.drain(..) {
4480                                                         txn.push($keysinterface.backing.spend_spendable_outputs(&[&outp], Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx).unwrap());
4481                                                         all_outputs.push(outp);
4482                                                 }
4483                                         },
4484                                         _ => panic!("Unexpected event"),
4485                                 };
4486                         }
4487                         if all_outputs.len() > 1 {
4488                                 if let Ok(tx) = $keysinterface.backing.spend_spendable_outputs(&all_outputs.iter().map(|a| a).collect::<Vec<_>>(), Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx) {
4489                                         txn.push(tx);
4490                                 }
4491                         }
4492                         txn
4493                 }
4494         }
4495 }
4496
4497 #[test]
4498 fn test_claim_sizeable_push_msat() {
4499         // Incidentally test SpendableOutput event generation due to detection of to_local output on commitment tx
4500         let chanmon_cfgs = create_chanmon_cfgs(2);
4501         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4502         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4503         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4504
4505         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000);
4506         nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
4507         check_closed_broadcast!(nodes[1], true);
4508         check_added_monitors!(nodes[1], 1);
4509         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
4510         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4511         assert_eq!(node_txn.len(), 1);
4512         check_spends!(node_txn[0], chan.3);
4513         assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
4514
4515         mine_transaction(&nodes[1], &node_txn[0]);
4516         connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1);
4517
4518         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4519         assert_eq!(spend_txn.len(), 1);
4520         assert_eq!(spend_txn[0].input.len(), 1);
4521         check_spends!(spend_txn[0], node_txn[0]);
4522         assert_eq!(spend_txn[0].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
4523 }
4524
4525 #[test]
4526 fn test_claim_on_remote_sizeable_push_msat() {
4527         // Same test as previous, just test on remote commitment tx, as per_commitment_point registration changes following you're funder/fundee and
4528         // to_remote output is encumbered by a P2WPKH
4529         let chanmon_cfgs = create_chanmon_cfgs(2);
4530         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4531         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4532         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4533
4534         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000);
4535         nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
4536         check_closed_broadcast!(nodes[0], true);
4537         check_added_monitors!(nodes[0], 1);
4538         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
4539
4540         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4541         assert_eq!(node_txn.len(), 1);
4542         check_spends!(node_txn[0], chan.3);
4543         assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
4544
4545         mine_transaction(&nodes[1], &node_txn[0]);
4546         check_closed_broadcast!(nodes[1], true);
4547         check_added_monitors!(nodes[1], 1);
4548         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4549         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4550
4551         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4552         assert_eq!(spend_txn.len(), 1);
4553         check_spends!(spend_txn[0], node_txn[0]);
4554 }
4555
4556 #[test]
4557 fn test_claim_on_remote_revoked_sizeable_push_msat() {
4558         // Same test as previous, just test on remote revoked commitment tx, as per_commitment_point registration changes following you're funder/fundee and
4559         // to_remote output is encumbered by a P2WPKH
4560
4561         let chanmon_cfgs = create_chanmon_cfgs(2);
4562         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4563         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4564         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4565
4566         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 59000000);
4567         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4568         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan.2);
4569         assert_eq!(revoked_local_txn[0].input.len(), 1);
4570         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
4571
4572         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4573         mine_transaction(&nodes[1], &revoked_local_txn[0]);
4574         check_closed_broadcast!(nodes[1], true);
4575         check_added_monitors!(nodes[1], 1);
4576         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4577
4578         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4579         mine_transaction(&nodes[1], &node_txn[0]);
4580         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4581
4582         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4583         assert_eq!(spend_txn.len(), 3);
4584         check_spends!(spend_txn[0], revoked_local_txn[0]); // to_remote output on revoked remote commitment_tx
4585         check_spends!(spend_txn[1], node_txn[0]);
4586         check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[0]); // Both outputs
4587 }
4588
4589 #[test]
4590 fn test_static_spendable_outputs_preimage_tx() {
4591         let chanmon_cfgs = create_chanmon_cfgs(2);
4592         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4593         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4594         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4595
4596         // Create some initial channels
4597         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4598
4599         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
4600
4601         let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
4602         assert_eq!(commitment_tx[0].input.len(), 1);
4603         assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
4604
4605         // Settle A's commitment tx on B's chain
4606         nodes[1].node.claim_funds(payment_preimage);
4607         expect_payment_claimed!(nodes[1], payment_hash, 3_000_000);
4608         check_added_monitors!(nodes[1], 1);
4609         mine_transaction(&nodes[1], &commitment_tx[0]);
4610         check_added_monitors!(nodes[1], 1);
4611         let events = nodes[1].node.get_and_clear_pending_msg_events();
4612         match events[0] {
4613                 MessageSendEvent::UpdateHTLCs { .. } => {},
4614                 _ => panic!("Unexpected event"),
4615         }
4616         match events[2] {
4617                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4618                 _ => panic!("Unexepected event"),
4619         }
4620
4621         // Check B's monitor was able to send back output descriptor event for preimage tx on A's commitment tx
4622         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: preimage tx
4623         assert_eq!(node_txn.len(), 1);
4624         check_spends!(node_txn[0], commitment_tx[0]);
4625         assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
4626
4627         mine_transaction(&nodes[1], &node_txn[0]);
4628         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4629         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4630
4631         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4632         assert_eq!(spend_txn.len(), 1);
4633         check_spends!(spend_txn[0], node_txn[0]);
4634 }
4635
4636 #[test]
4637 fn test_static_spendable_outputs_timeout_tx() {
4638         let chanmon_cfgs = create_chanmon_cfgs(2);
4639         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4640         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4641         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4642
4643         // Create some initial channels
4644         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4645
4646         // Rebalance the network a bit by relaying one payment through all the channels ...
4647         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
4648
4649         let (_, our_payment_hash, ..) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000);
4650
4651         let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
4652         assert_eq!(commitment_tx[0].input.len(), 1);
4653         assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
4654
4655         // Settle A's commitment tx on B' chain
4656         mine_transaction(&nodes[1], &commitment_tx[0]);
4657         check_added_monitors!(nodes[1], 1);
4658         let events = nodes[1].node.get_and_clear_pending_msg_events();
4659         match events[1] {
4660                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4661                 _ => panic!("Unexpected event"),
4662         }
4663         connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
4664
4665         // Check B's monitor was able to send back output descriptor event for timeout tx on A's commitment tx
4666         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4667         assert_eq!(node_txn.len(), 1); // ChannelMonitor: timeout tx
4668         check_spends!(node_txn[0],  commitment_tx[0].clone());
4669         assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4670
4671         mine_transaction(&nodes[1], &node_txn[0]);
4672         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4673         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4674         expect_payment_failed!(nodes[1], our_payment_hash, false);
4675
4676         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4677         assert_eq!(spend_txn.len(), 3); // SpendableOutput: remote_commitment_tx.to_remote, timeout_tx.output
4678         check_spends!(spend_txn[0], commitment_tx[0]);
4679         check_spends!(spend_txn[1], node_txn[0]);
4680         check_spends!(spend_txn[2], node_txn[0], commitment_tx[0]); // All outputs
4681 }
4682
4683 #[test]
4684 fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
4685         let chanmon_cfgs = create_chanmon_cfgs(2);
4686         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4687         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4688         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4689
4690         // Create some initial channels
4691         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4692
4693         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4694         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
4695         assert_eq!(revoked_local_txn[0].input.len(), 1);
4696         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4697
4698         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4699
4700         mine_transaction(&nodes[1], &revoked_local_txn[0]);
4701         check_closed_broadcast!(nodes[1], true);
4702         check_added_monitors!(nodes[1], 1);
4703         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4704
4705         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4706         assert_eq!(node_txn.len(), 1);
4707         assert_eq!(node_txn[0].input.len(), 2);
4708         check_spends!(node_txn[0], revoked_local_txn[0]);
4709
4710         mine_transaction(&nodes[1], &node_txn[0]);
4711         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4712
4713         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4714         assert_eq!(spend_txn.len(), 1);
4715         check_spends!(spend_txn[0], node_txn[0]);
4716 }
4717
4718 #[test]
4719 fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
4720         let mut chanmon_cfgs = create_chanmon_cfgs(2);
4721         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
4722         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4723         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4724         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4725
4726         // Create some initial channels
4727         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4728
4729         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4730         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
4731         assert_eq!(revoked_local_txn[0].input.len(), 1);
4732         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4733
4734         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4735
4736         // A will generate HTLC-Timeout from revoked commitment tx
4737         mine_transaction(&nodes[0], &revoked_local_txn[0]);
4738         check_closed_broadcast!(nodes[0], true);
4739         check_added_monitors!(nodes[0], 1);
4740         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
4741         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
4742
4743         let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4744         assert_eq!(revoked_htlc_txn.len(), 1);
4745         assert_eq!(revoked_htlc_txn[0].input.len(), 1);
4746         assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
4747         check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]);
4748         assert_ne!(revoked_htlc_txn[0].lock_time, LockTime::ZERO); // HTLC-Timeout
4749
4750         // B will generate justice tx from A's revoked commitment/HTLC tx
4751         connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
4752         check_closed_broadcast!(nodes[1], true);
4753         check_added_monitors!(nodes[1], 1);
4754         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4755
4756         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4757         assert_eq!(node_txn.len(), 2); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs
4758         // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0]
4759         // including the one already spent by revoked_htlc_txn[1]. That's OK, we'll spend with valid
4760         // transactions next...
4761         assert_eq!(node_txn[0].input.len(), 3);
4762         check_spends!(node_txn[0], revoked_local_txn[0], revoked_htlc_txn[0]);
4763
4764         assert_eq!(node_txn[1].input.len(), 2);
4765         check_spends!(node_txn[1], revoked_local_txn[0], revoked_htlc_txn[0]);
4766         if node_txn[1].input[1].previous_output.txid == revoked_htlc_txn[0].txid() {
4767                 assert_ne!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4768         } else {
4769                 assert_eq!(node_txn[1].input[0].previous_output.txid, revoked_htlc_txn[0].txid());
4770                 assert_ne!(node_txn[1].input[1].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4771         }
4772
4773         mine_transaction(&nodes[1], &node_txn[1]);
4774         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4775
4776         // Check B's ChannelMonitor was able to generate the right spendable output descriptor
4777         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4778         assert_eq!(spend_txn.len(), 1);
4779         assert_eq!(spend_txn[0].input.len(), 1);
4780         check_spends!(spend_txn[0], node_txn[1]);
4781 }
4782
4783 #[test]
4784 fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
4785         let mut chanmon_cfgs = create_chanmon_cfgs(2);
4786         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
4787         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4788         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4789         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4790
4791         // Create some initial channels
4792         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4793
4794         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4795         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
4796         assert_eq!(revoked_local_txn[0].input.len(), 1);
4797         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4798
4799         // The to-be-revoked commitment tx should have one HTLC and one to_remote output
4800         assert_eq!(revoked_local_txn[0].output.len(), 2);
4801
4802         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4803
4804         // B will generate HTLC-Success from revoked commitment tx
4805         mine_transaction(&nodes[1], &revoked_local_txn[0]);
4806         check_closed_broadcast!(nodes[1], true);
4807         check_added_monitors!(nodes[1], 1);
4808         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4809         let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4810
4811         assert_eq!(revoked_htlc_txn.len(), 1);
4812         assert_eq!(revoked_htlc_txn[0].input.len(), 1);
4813         assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4814         check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]);
4815
4816         // Check that the unspent (of two) outputs on revoked_local_txn[0] is a P2WPKH:
4817         let unspent_local_txn_output = revoked_htlc_txn[0].input[0].previous_output.vout as usize ^ 1;
4818         assert_eq!(revoked_local_txn[0].output[unspent_local_txn_output].script_pubkey.len(), 2 + 20); // P2WPKH
4819
4820         // A will generate justice tx from B's revoked commitment/HTLC tx
4821         connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
4822         check_closed_broadcast!(nodes[0], true);
4823         check_added_monitors!(nodes[0], 1);
4824         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
4825
4826         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4827         assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success
4828
4829         // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0]
4830         // including the one already spent by revoked_htlc_txn[0]. That's OK, we'll spend with valid
4831         // transactions next...
4832         assert_eq!(node_txn[0].input.len(), 2);
4833         check_spends!(node_txn[0], revoked_local_txn[0], revoked_htlc_txn[0]);
4834         if node_txn[0].input[1].previous_output.txid == revoked_htlc_txn[0].txid() {
4835                 assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4836         } else {
4837                 assert_eq!(node_txn[0].input[0].previous_output.txid, revoked_htlc_txn[0].txid());
4838                 assert_eq!(node_txn[0].input[1].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4839         }
4840
4841         assert_eq!(node_txn[1].input.len(), 1);
4842         check_spends!(node_txn[1], revoked_htlc_txn[0]);
4843
4844         mine_transaction(&nodes[0], &node_txn[1]);
4845         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
4846
4847         // Note that nodes[0]'s tx_broadcaster is still locked, so if we get here the channelmonitor
4848         // didn't try to generate any new transactions.
4849
4850         // Check A's ChannelMonitor was able to generate the right spendable output descriptor
4851         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
4852         assert_eq!(spend_txn.len(), 3);
4853         assert_eq!(spend_txn[0].input.len(), 1);
4854         check_spends!(spend_txn[0], revoked_local_txn[0]); // spending to_remote output from revoked local tx
4855         assert_ne!(spend_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4856         check_spends!(spend_txn[1], node_txn[1]); // spending justice tx output on the htlc success tx
4857         check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[1]); // Both outputs
4858 }
4859
4860 #[test]
4861 fn test_onchain_to_onchain_claim() {
4862         // Test that in case of channel closure, we detect the state of output and claim HTLC
4863         // on downstream peer's remote commitment tx.
4864         // First, have C claim an HTLC against its own latest commitment transaction.
4865         // Then, broadcast these to B, which should update the monitor downstream on the A<->B
4866         // channel.
4867         // Finally, check that B will claim the HTLC output if A's latest commitment transaction
4868         // gets broadcast.
4869
4870         let chanmon_cfgs = create_chanmon_cfgs(3);
4871         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
4872         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
4873         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
4874
4875         // Create some initial channels
4876         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4877         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4878
4879         // Ensure all nodes are at the same height
4880         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
4881         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
4882         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
4883         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
4884
4885         // Rebalance the network a bit by relaying one payment through all the channels ...
4886         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
4887         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
4888
4889         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
4890         let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
4891         check_spends!(commitment_tx[0], chan_2.3);
4892         nodes[2].node.claim_funds(payment_preimage);
4893         expect_payment_claimed!(nodes[2], payment_hash, 3_000_000);
4894         check_added_monitors!(nodes[2], 1);
4895         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
4896         assert!(updates.update_add_htlcs.is_empty());
4897         assert!(updates.update_fail_htlcs.is_empty());
4898         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
4899         assert!(updates.update_fail_malformed_htlcs.is_empty());
4900
4901         mine_transaction(&nodes[2], &commitment_tx[0]);
4902         check_closed_broadcast!(nodes[2], true);
4903         check_added_monitors!(nodes[2], 1);
4904         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
4905
4906         let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 1 (HTLC-Success tx)
4907         assert_eq!(c_txn.len(), 1);
4908         check_spends!(c_txn[0], commitment_tx[0]);
4909         assert_eq!(c_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4910         assert!(c_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
4911         assert_eq!(c_txn[0].lock_time, LockTime::ZERO); // Success tx
4912
4913         // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor
4914         connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), c_txn[0].clone()]));
4915         check_added_monitors!(nodes[1], 1);
4916         let events = nodes[1].node.get_and_clear_pending_events();
4917         assert_eq!(events.len(), 2);
4918         match events[0] {
4919                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
4920                 _ => panic!("Unexpected event"),
4921         }
4922         match events[1] {
4923                 Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
4924                         next_channel_id, outbound_amount_forwarded_msat, ..
4925                 } => {
4926                         assert_eq!(total_fee_earned_msat, Some(1000));
4927                         assert_eq!(prev_channel_id, Some(chan_1.2));
4928                         assert_eq!(claim_from_onchain_tx, true);
4929                         assert_eq!(next_channel_id, Some(chan_2.2));
4930                         assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
4931                 },
4932                 _ => panic!("Unexpected event"),
4933         }
4934         check_added_monitors!(nodes[1], 1);
4935         let mut msg_events = nodes[1].node.get_and_clear_pending_msg_events();
4936         assert_eq!(msg_events.len(), 3);
4937         let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut msg_events);
4938         let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut msg_events);
4939
4940         match nodes_2_event {
4941                 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id: _ } => {},
4942                 _ => panic!("Unexpected event"),
4943         }
4944
4945         match nodes_0_event {
4946                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
4947                         assert!(update_add_htlcs.is_empty());
4948                         assert!(update_fail_htlcs.is_empty());
4949                         assert_eq!(update_fulfill_htlcs.len(), 1);
4950                         assert!(update_fail_malformed_htlcs.is_empty());
4951                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
4952                 },
4953                 _ => panic!("Unexpected event"),
4954         };
4955
4956         // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
4957         match msg_events[0] {
4958                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4959                 _ => panic!("Unexpected event"),
4960         }
4961
4962         // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
4963         let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
4964         mine_transaction(&nodes[1], &commitment_tx[0]);
4965         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4966         let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4967         // ChannelMonitor: HTLC-Success tx
4968         assert_eq!(b_txn.len(), 1);
4969         check_spends!(b_txn[0], commitment_tx[0]);
4970         assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
4971         assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
4972         assert_eq!(b_txn[0].lock_time.to_consensus_u32(), nodes[1].best_block_info().1); // Success tx
4973
4974         check_closed_broadcast!(nodes[1], true);
4975         check_added_monitors!(nodes[1], 1);
4976 }
4977
4978 #[test]
4979 fn test_duplicate_payment_hash_one_failure_one_success() {
4980         // Topology : A --> B --> C --> D
4981         // We route 2 payments with same hash between B and C, one will be timeout, the other successfully claim
4982         // Note that because C will refuse to generate two payment secrets for the same payment hash,
4983         // we forward one of the payments onwards to D.
4984         let chanmon_cfgs = create_chanmon_cfgs(4);
4985         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
4986         // When this test was written, the default base fee floated based on the HTLC count.
4987         // It is now fixed, so we simply set the fee to the expected value here.
4988         let mut config = test_default_channel_config();
4989         config.channel_config.forwarding_fee_base_msat = 196;
4990         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs,
4991                 &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
4992         let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
4993
4994         create_announced_chan_between_nodes(&nodes, 0, 1);
4995         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4996         create_announced_chan_between_nodes(&nodes, 2, 3);
4997
4998         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
4999         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
5000         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
5001         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
5002         connect_blocks(&nodes[3], node_max_height - nodes[3].best_block_info().1);
5003
5004         let (our_payment_preimage, duplicate_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 900_000);
5005
5006         let payment_secret = nodes[3].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200, None).unwrap();
5007         // We reduce the final CLTV here by a somewhat arbitrary constant to keep it under the one-byte
5008         // script push size limit so that the below script length checks match
5009         // ACCEPTED_HTLC_SCRIPT_WEIGHT.
5010         let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV - 40)
5011                 .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap();
5012         let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[3], payment_params, 800_000);
5013         send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[2], &nodes[3]]], 800_000, duplicate_payment_hash, payment_secret);
5014
5015         let commitment_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
5016         assert_eq!(commitment_txn[0].input.len(), 1);
5017         check_spends!(commitment_txn[0], chan_2.3);
5018
5019         mine_transaction(&nodes[1], &commitment_txn[0]);
5020         check_closed_broadcast!(nodes[1], true);
5021         check_added_monitors!(nodes[1], 1);
5022         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
5023         connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
5024
5025         let htlc_timeout_tx;
5026         { // Extract one of the two HTLC-Timeout transaction
5027                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5028                 // ChannelMonitor: timeout tx * 2-or-3
5029                 assert!(node_txn.len() == 2 || node_txn.len() == 3);
5030
5031                 check_spends!(node_txn[0], commitment_txn[0]);
5032                 assert_eq!(node_txn[0].input.len(), 1);
5033                 assert_eq!(node_txn[0].output.len(), 1);
5034
5035                 if node_txn.len() > 2 {
5036                         check_spends!(node_txn[1], commitment_txn[0]);
5037                         assert_eq!(node_txn[1].input.len(), 1);
5038                         assert_eq!(node_txn[1].output.len(), 1);
5039                         assert_eq!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
5040
5041                         check_spends!(node_txn[2], commitment_txn[0]);
5042                         assert_eq!(node_txn[2].input.len(), 1);
5043                         assert_eq!(node_txn[2].output.len(), 1);
5044                         assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output);
5045                 } else {
5046                         check_spends!(node_txn[1], commitment_txn[0]);
5047                         assert_eq!(node_txn[1].input.len(), 1);
5048                         assert_eq!(node_txn[1].output.len(), 1);
5049                         assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
5050                 }
5051
5052                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5053                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5054                 // Assign htlc_timeout_tx to the forwarded HTLC (with value ~800 sats). The received HTLC
5055                 // (with value 900 sats) will be claimed in the below `claim_funds` call.
5056                 if node_txn.len() > 2 {
5057                         assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5058                         htlc_timeout_tx = if node_txn[2].output[0].value < 900 { node_txn[2].clone() } else { node_txn[0].clone() };
5059                 } else {
5060                         htlc_timeout_tx = if node_txn[0].output[0].value < 900 { node_txn[1].clone() } else { node_txn[0].clone() };
5061                 }
5062         }
5063
5064         nodes[2].node.claim_funds(our_payment_preimage);
5065         expect_payment_claimed!(nodes[2], duplicate_payment_hash, 900_000);
5066
5067         mine_transaction(&nodes[2], &commitment_txn[0]);
5068         check_added_monitors!(nodes[2], 2);
5069         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
5070         let events = nodes[2].node.get_and_clear_pending_msg_events();
5071         match events[0] {
5072                 MessageSendEvent::UpdateHTLCs { .. } => {},
5073                 _ => panic!("Unexpected event"),
5074         }
5075         match events[2] {
5076                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
5077                 _ => panic!("Unexepected event"),
5078         }
5079         let htlc_success_txn: Vec<_> = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
5080         assert_eq!(htlc_success_txn.len(), 2); // ChannelMonitor: HTLC-Success txn (*2 due to 2-HTLC outputs)
5081         check_spends!(htlc_success_txn[0], commitment_txn[0]);
5082         check_spends!(htlc_success_txn[1], commitment_txn[0]);
5083         assert_eq!(htlc_success_txn[0].input.len(), 1);
5084         assert_eq!(htlc_success_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5085         assert_eq!(htlc_success_txn[1].input.len(), 1);
5086         assert_eq!(htlc_success_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5087         assert_ne!(htlc_success_txn[0].input[0].previous_output, htlc_success_txn[1].input[0].previous_output);
5088         assert_ne!(htlc_success_txn[1].input[0].previous_output, htlc_timeout_tx.input[0].previous_output);
5089
5090         mine_transaction(&nodes[1], &htlc_timeout_tx);
5091         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5092         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
5093         let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5094         assert!(htlc_updates.update_add_htlcs.is_empty());
5095         assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
5096         let first_htlc_id = htlc_updates.update_fail_htlcs[0].htlc_id;
5097         assert!(htlc_updates.update_fulfill_htlcs.is_empty());
5098         assert!(htlc_updates.update_fail_malformed_htlcs.is_empty());
5099         check_added_monitors!(nodes[1], 1);
5100
5101         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
5102         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
5103         {
5104                 commitment_signed_dance!(nodes[0], nodes[1], &htlc_updates.commitment_signed, false, true);
5105         }
5106         expect_payment_failed_with_update!(nodes[0], duplicate_payment_hash, false, chan_2.0.contents.short_channel_id, true);
5107
5108         // Solve 2nd HTLC by broadcasting on B's chain HTLC-Success Tx from C
5109         mine_transaction(&nodes[1], &htlc_success_txn[1]);
5110         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(196), true, true);
5111         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5112         assert!(updates.update_add_htlcs.is_empty());
5113         assert!(updates.update_fail_htlcs.is_empty());
5114         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
5115         assert_ne!(updates.update_fulfill_htlcs[0].htlc_id, first_htlc_id);
5116         assert!(updates.update_fail_malformed_htlcs.is_empty());
5117         check_added_monitors!(nodes[1], 1);
5118
5119         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
5120         commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false);
5121         expect_payment_sent(&nodes[0], our_payment_preimage, None, true, true);
5122 }
5123
5124 #[test]
5125 fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
5126         let chanmon_cfgs = create_chanmon_cfgs(2);
5127         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5128         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5129         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5130
5131         // Create some initial channels
5132         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5133
5134         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
5135         let local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
5136         assert_eq!(local_txn.len(), 1);
5137         assert_eq!(local_txn[0].input.len(), 1);
5138         check_spends!(local_txn[0], chan_1.3);
5139
5140         // Give B knowledge of preimage to be able to generate a local HTLC-Success Tx
5141         nodes[1].node.claim_funds(payment_preimage);
5142         expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
5143         check_added_monitors!(nodes[1], 1);
5144
5145         mine_transaction(&nodes[1], &local_txn[0]);
5146         check_added_monitors!(nodes[1], 1);
5147         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
5148         let events = nodes[1].node.get_and_clear_pending_msg_events();
5149         match events[0] {
5150                 MessageSendEvent::UpdateHTLCs { .. } => {},
5151                 _ => panic!("Unexpected event"),
5152         }
5153         match events[2] {
5154                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
5155                 _ => panic!("Unexepected event"),
5156         }
5157         let node_tx = {
5158                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5159                 assert_eq!(node_txn.len(), 1);
5160                 assert_eq!(node_txn[0].input.len(), 1);
5161                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5162                 check_spends!(node_txn[0], local_txn[0]);
5163                 node_txn[0].clone()
5164         };
5165
5166         mine_transaction(&nodes[1], &node_tx);
5167         connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1);
5168
5169         // Verify that B is able to spend its own HTLC-Success tx thanks to spendable output event given back by its ChannelMonitor
5170         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5171         assert_eq!(spend_txn.len(), 1);
5172         assert_eq!(spend_txn[0].input.len(), 1);
5173         check_spends!(spend_txn[0], node_tx);
5174         assert_eq!(spend_txn[0].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5175 }
5176
5177 fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, announce_latest: bool) {
5178         // Test that we fail backwards the full set of HTLCs we need to when remote broadcasts an
5179         // unrevoked commitment transaction.
5180         // This includes HTLCs which were below the dust threshold as well as HTLCs which were awaiting
5181         // a remote RAA before they could be failed backwards (and combinations thereof).
5182         // We also test duplicate-hash HTLCs by adding two nodes on each side of the target nodes which
5183         // use the same payment hashes.
5184         // Thus, we use a six-node network:
5185         //
5186         // A \         / E
5187         //    - C - D -
5188         // B /         \ F
5189         // And test where C fails back to A/B when D announces its latest commitment transaction
5190         let chanmon_cfgs = create_chanmon_cfgs(6);
5191         let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
5192         // When this test was written, the default base fee floated based on the HTLC count.
5193         // It is now fixed, so we simply set the fee to the expected value here.
5194         let mut config = test_default_channel_config();
5195         config.channel_config.forwarding_fee_base_msat = 196;
5196         let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs,
5197                 &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
5198         let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
5199
5200         let _chan_0_2 = create_announced_chan_between_nodes(&nodes, 0, 2);
5201         let _chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
5202         let chan_2_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
5203         let chan_3_4 = create_announced_chan_between_nodes(&nodes, 3, 4);
5204         let chan_3_5  = create_announced_chan_between_nodes(&nodes, 3, 5);
5205
5206         // Rebalance and check output sanity...
5207         send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000);
5208         send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000);
5209         assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2);
5210
5211         let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id())
5212                 .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context().holder_dust_limit_satoshis;
5213         // 0th HTLC:
5214         let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5215         // 1st HTLC:
5216         let (_, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5217         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
5218         // 2nd HTLC:
5219         send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_1, nodes[5].node.create_inbound_payment_for_hash(payment_hash_1, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5220         // 3rd HTLC:
5221         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_2, nodes[5].node.create_inbound_payment_for_hash(payment_hash_2, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5222         // 4th HTLC:
5223         let (_, payment_hash_3, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5224         // 5th HTLC:
5225         let (_, payment_hash_4, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5226         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
5227         // 6th HTLC:
5228         send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_3, nodes[5].node.create_inbound_payment_for_hash(payment_hash_3, None, 7200, None).unwrap());
5229         // 7th HTLC:
5230         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_4, nodes[5].node.create_inbound_payment_for_hash(payment_hash_4, None, 7200, None).unwrap());
5231
5232         // 8th HTLC:
5233         let (_, payment_hash_5, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5234         // 9th HTLC:
5235         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
5236         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_5, nodes[5].node.create_inbound_payment_for_hash(payment_hash_5, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5237
5238         // 10th HTLC:
5239         let (_, payment_hash_6, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5240         // 11th HTLC:
5241         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
5242         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_6, nodes[5].node.create_inbound_payment_for_hash(payment_hash_6, None, 7200, None).unwrap());
5243
5244         // Double-check that six of the new HTLC were added
5245         // We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie,
5246         // with to_local and to_remote outputs, 8 outputs and 6 HTLCs not included).
5247         assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2).len(), 1);
5248         assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 8);
5249
5250         // Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go.
5251         // Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs
5252         nodes[4].node.fail_htlc_backwards(&payment_hash_1);
5253         nodes[4].node.fail_htlc_backwards(&payment_hash_3);
5254         nodes[4].node.fail_htlc_backwards(&payment_hash_5);
5255         nodes[4].node.fail_htlc_backwards(&payment_hash_6);
5256         check_added_monitors!(nodes[4], 0);
5257
5258         let failed_destinations = vec![
5259                 HTLCDestination::FailedPayment { payment_hash: payment_hash_1 },
5260                 HTLCDestination::FailedPayment { payment_hash: payment_hash_3 },
5261                 HTLCDestination::FailedPayment { payment_hash: payment_hash_5 },
5262                 HTLCDestination::FailedPayment { payment_hash: payment_hash_6 },
5263         ];
5264         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[4], failed_destinations);
5265         check_added_monitors!(nodes[4], 1);
5266
5267         let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id());
5268         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[0]);
5269         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[1]);
5270         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[2]);
5271         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[3]);
5272         commitment_signed_dance!(nodes[3], nodes[4], four_removes.commitment_signed, false);
5273
5274         // Fail 3rd below-dust and 7th above-dust HTLCs
5275         nodes[5].node.fail_htlc_backwards(&payment_hash_2);
5276         nodes[5].node.fail_htlc_backwards(&payment_hash_4);
5277         check_added_monitors!(nodes[5], 0);
5278
5279         let failed_destinations_2 = vec![
5280                 HTLCDestination::FailedPayment { payment_hash: payment_hash_2 },
5281                 HTLCDestination::FailedPayment { payment_hash: payment_hash_4 },
5282         ];
5283         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[5], failed_destinations_2);
5284         check_added_monitors!(nodes[5], 1);
5285
5286         let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id());
5287         nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[0]);
5288         nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]);
5289         commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false);
5290
5291         let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
5292
5293         // After 4 and 2 removes respectively above in nodes[4] and nodes[5], nodes[3] should receive 6 PaymentForwardedFailed events
5294         let failed_destinations_3 = vec![
5295                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5296                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5297                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5298                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5299                 HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
5300                 HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
5301         ];
5302         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations_3);
5303         check_added_monitors!(nodes[3], 1);
5304         let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
5305         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]);
5306         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[1]);
5307         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[2]);
5308         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[3]);
5309         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[4]);
5310         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[5]);
5311         if deliver_last_raa {
5312                 commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false);
5313         } else {
5314                 let _cs_last_raa = commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false, true, false, true);
5315         }
5316
5317         // D's latest commitment transaction now contains 1st + 2nd + 9th HTLCs (implicitly, they're
5318         // below the dust limit) and the 5th + 6th + 11th HTLCs. It has failed back the 0th, 3rd, 4th,
5319         // 7th, 8th, and 10th, but as we haven't yet delivered the final RAA to C, the fails haven't
5320         // propagated back to A/B yet (and D has two unrevoked commitment transactions).
5321         //
5322         // We now broadcast the latest commitment transaction, which *should* result in failures for
5323         // the 0th, 1st, 2nd, 3rd, 4th, 7th, 8th, 9th, and 10th HTLCs, ie all the below-dust HTLCs and
5324         // the non-broadcast above-dust HTLCs.
5325         //
5326         // Alternatively, we may broadcast the previous commitment transaction, which should only
5327         // result in failures for the below-dust HTLCs, ie the 0th, 1st, 2nd, 3rd, 9th, and 10th HTLCs.
5328         let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
5329
5330         if announce_latest {
5331                 mine_transaction(&nodes[2], &ds_last_commitment_tx[0]);
5332         } else {
5333                 mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]);
5334         }
5335         let events = nodes[2].node.get_and_clear_pending_events();
5336         let close_event = if deliver_last_raa {
5337                 assert_eq!(events.len(), 2 + 6);
5338                 events.last().clone().unwrap()
5339         } else {
5340                 assert_eq!(events.len(), 1);
5341                 events.last().clone().unwrap()
5342         };
5343         match close_event {
5344                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
5345                 _ => panic!("Unexpected event"),
5346         }
5347
5348         connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
5349         check_closed_broadcast!(nodes[2], true);
5350         if deliver_last_raa {
5351                 expect_pending_htlcs_forwardable_from_events!(nodes[2], events[1..2], true);
5352
5353                 let expected_destinations: Vec<HTLCDestination> = repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect();
5354                 expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations);
5355         } else {
5356                 let expected_destinations: Vec<HTLCDestination> = if announce_latest {
5357                         repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(9).collect()
5358                 } else {
5359                         repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(6).collect()
5360                 };
5361
5362                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], expected_destinations);
5363         }
5364         check_added_monitors!(nodes[2], 3);
5365
5366         let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events();
5367         assert_eq!(cs_msgs.len(), 2);
5368         let mut a_done = false;
5369         for msg in cs_msgs {
5370                 match msg {
5371                         MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
5372                                 // Both under-dust HTLCs and the one above-dust HTLC that we had already failed
5373                                 // should be failed-backwards here.
5374                                 let target = if *node_id == nodes[0].node.get_our_node_id() {
5375                                         // If announce_latest, expect 0th, 1st, 4th, 8th, 10th HTLCs, else only 0th, 1st, 10th below-dust HTLCs
5376                                         for htlc in &updates.update_fail_htlcs {
5377                                                 assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 6 || if announce_latest { htlc.htlc_id == 3 || htlc.htlc_id == 5 } else { false });
5378                                         }
5379                                         assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 5 } else { 3 });
5380                                         assert!(!a_done);
5381                                         a_done = true;
5382                                         &nodes[0]
5383                                 } else {
5384                                         // If announce_latest, expect 2nd, 3rd, 7th, 9th HTLCs, else only 2nd, 3rd, 9th below-dust HTLCs
5385                                         for htlc in &updates.update_fail_htlcs {
5386                                                 assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 5 || if announce_latest { htlc.htlc_id == 4 } else { false });
5387                                         }
5388                                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
5389                                         assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 4 } else { 3 });
5390                                         &nodes[1]
5391                                 };
5392                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
5393                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[1]);
5394                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[2]);
5395                                 if announce_latest {
5396                                         target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[3]);
5397                                         if *node_id == nodes[0].node.get_our_node_id() {
5398                                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[4]);
5399                                         }
5400                                 }
5401                                 commitment_signed_dance!(target, nodes[2], updates.commitment_signed, false, true);
5402                         },
5403                         _ => panic!("Unexpected event"),
5404                 }
5405         }
5406
5407         let as_events = nodes[0].node.get_and_clear_pending_events();
5408         assert_eq!(as_events.len(), if announce_latest { 10 } else { 6 });
5409         let mut as_faileds = new_hash_set();
5410         let mut as_updates = 0;
5411         for event in as_events.iter() {
5412                 if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
5413                         assert!(as_faileds.insert(*payment_hash));
5414                         if *payment_hash != payment_hash_2 {
5415                                 assert_eq!(*payment_failed_permanently, deliver_last_raa);
5416                         } else {
5417                                 assert!(!payment_failed_permanently);
5418                         }
5419                         if let PathFailure::OnPath { network_update: Some(_) } = failure {
5420                                 as_updates += 1;
5421                         }
5422                 } else if let &Event::PaymentFailed { .. } = event {
5423                 } else { panic!("Unexpected event"); }
5424         }
5425         assert!(as_faileds.contains(&payment_hash_1));
5426         assert!(as_faileds.contains(&payment_hash_2));
5427         if announce_latest {
5428                 assert!(as_faileds.contains(&payment_hash_3));
5429                 assert!(as_faileds.contains(&payment_hash_5));
5430         }
5431         assert!(as_faileds.contains(&payment_hash_6));
5432
5433         let bs_events = nodes[1].node.get_and_clear_pending_events();
5434         assert_eq!(bs_events.len(), if announce_latest { 8 } else { 6 });
5435         let mut bs_faileds = new_hash_set();
5436         let mut bs_updates = 0;
5437         for event in bs_events.iter() {
5438                 if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
5439                         assert!(bs_faileds.insert(*payment_hash));
5440                         if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 {
5441                                 assert_eq!(*payment_failed_permanently, deliver_last_raa);
5442                         } else {
5443                                 assert!(!payment_failed_permanently);
5444                         }
5445                         if let PathFailure::OnPath { network_update: Some(_) } = failure {
5446                                 bs_updates += 1;
5447                         }
5448                 } else if let &Event::PaymentFailed { .. } = event {
5449                 } else { panic!("Unexpected event"); }
5450         }
5451         assert!(bs_faileds.contains(&payment_hash_1));
5452         assert!(bs_faileds.contains(&payment_hash_2));
5453         if announce_latest {
5454                 assert!(bs_faileds.contains(&payment_hash_4));
5455         }
5456         assert!(bs_faileds.contains(&payment_hash_5));
5457
5458         // For each HTLC which was not failed-back by normal process (ie deliver_last_raa), we should
5459         // get a NetworkUpdate. A should have gotten 4 HTLCs which were failed-back due to
5460         // unknown-preimage-etc, B should have gotten 2. Thus, in the
5461         // announce_latest && deliver_last_raa case, we should have 5-4=1 and 4-2=2 NetworkUpdates.
5462         assert_eq!(as_updates, if deliver_last_raa { 1 } else if !announce_latest { 3 } else { 5 });
5463         assert_eq!(bs_updates, if deliver_last_raa { 2 } else if !announce_latest { 3 } else { 4 });
5464 }
5465
5466 #[test]
5467 fn test_fail_backwards_latest_remote_announce_a() {
5468         do_test_fail_backwards_unrevoked_remote_announce(false, true);
5469 }
5470
5471 #[test]
5472 fn test_fail_backwards_latest_remote_announce_b() {
5473         do_test_fail_backwards_unrevoked_remote_announce(true, true);
5474 }
5475
5476 #[test]
5477 fn test_fail_backwards_previous_remote_announce() {
5478         do_test_fail_backwards_unrevoked_remote_announce(false, false);
5479         // Note that true, true doesn't make sense as it implies we announce a revoked state, which is
5480         // tested for in test_commitment_revoked_fail_backward_exhaustive()
5481 }
5482
5483 #[test]
5484 fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
5485         let chanmon_cfgs = create_chanmon_cfgs(2);
5486         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5487         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5488         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5489
5490         // Create some initial channels
5491         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5492
5493         let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
5494         let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
5495         assert_eq!(local_txn[0].input.len(), 1);
5496         check_spends!(local_txn[0], chan_1.3);
5497
5498         // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
5499         mine_transaction(&nodes[0], &local_txn[0]);
5500         check_closed_broadcast!(nodes[0], true);
5501         check_added_monitors!(nodes[0], 1);
5502         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
5503         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
5504
5505         let htlc_timeout = {
5506                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5507                 assert_eq!(node_txn.len(), 1);
5508                 assert_eq!(node_txn[0].input.len(), 1);
5509                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5510                 check_spends!(node_txn[0], local_txn[0]);
5511                 node_txn[0].clone()
5512         };
5513
5514         mine_transaction(&nodes[0], &htlc_timeout);
5515         connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
5516         expect_payment_failed!(nodes[0], our_payment_hash, false);
5517
5518         // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
5519         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
5520         assert_eq!(spend_txn.len(), 3);
5521         check_spends!(spend_txn[0], local_txn[0]);
5522         assert_eq!(spend_txn[1].input.len(), 1);
5523         check_spends!(spend_txn[1], htlc_timeout);
5524         assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5525         assert_eq!(spend_txn[2].input.len(), 2);
5526         check_spends!(spend_txn[2], local_txn[0], htlc_timeout);
5527         assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 ||
5528                 spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32);
5529 }
5530
5531 #[test]
5532 fn test_key_derivation_params() {
5533         // This test is a copy of test_dynamic_spendable_outputs_local_htlc_timeout_tx, with a key
5534         // manager rotation to test that `channel_keys_id` returned in
5535         // [`SpendableOutputDescriptor::DelayedPaymentOutput`] let us re-derive the channel key set to
5536         // then derive a `delayed_payment_key`.
5537
5538         let chanmon_cfgs = create_chanmon_cfgs(3);
5539
5540         // We manually create the node configuration to backup the seed.
5541         let seed = [42; 32];
5542         let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
5543         let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager);
5544         let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[0].logger));
5545         let scorer = RwLock::new(test_utils::TestScorer::new());
5546         let router = test_utils::TestRouter::new(network_graph.clone(), &chanmon_cfgs[0].logger, &scorer);
5547         let message_router = test_utils::TestMessageRouter::new(network_graph.clone(), &keys_manager);
5548         let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, message_router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) };
5549         let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
5550         node_cfgs.remove(0);
5551         node_cfgs.insert(0, node);
5552
5553         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
5554         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
5555
5556         // Create some initial channels
5557         // Create a dummy channel to advance index by one and thus test re-derivation correctness
5558         // for node 0
5559         let chan_0 = create_announced_chan_between_nodes(&nodes, 0, 2);
5560         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5561         assert_ne!(chan_0.3.output[0].script_pubkey, chan_1.3.output[0].script_pubkey);
5562
5563         // Ensure all nodes are at the same height
5564         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
5565         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
5566         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
5567         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
5568
5569         let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
5570         let local_txn_0 = get_local_commitment_txn!(nodes[0], chan_0.2);
5571         let local_txn_1 = get_local_commitment_txn!(nodes[0], chan_1.2);
5572         assert_eq!(local_txn_1[0].input.len(), 1);
5573         check_spends!(local_txn_1[0], chan_1.3);
5574
5575         // We check funding pubkey are unique
5576         let (from_0_funding_key_0, from_0_funding_key_1) = (PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][36..69]));
5577         let (from_1_funding_key_0, from_1_funding_key_1) = (PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][36..69]));
5578         if from_0_funding_key_0 == from_1_funding_key_0
5579             || from_0_funding_key_0 == from_1_funding_key_1
5580             || from_0_funding_key_1 == from_1_funding_key_0
5581             || from_0_funding_key_1 == from_1_funding_key_1 {
5582                 panic!("Funding pubkeys aren't unique");
5583         }
5584
5585         // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
5586         mine_transaction(&nodes[0], &local_txn_1[0]);
5587         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
5588         check_closed_broadcast!(nodes[0], true);
5589         check_added_monitors!(nodes[0], 1);
5590         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
5591
5592         let htlc_timeout = {
5593                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5594                 assert_eq!(node_txn.len(), 1);
5595                 assert_eq!(node_txn[0].input.len(), 1);
5596                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5597                 check_spends!(node_txn[0], local_txn_1[0]);
5598                 node_txn[0].clone()
5599         };
5600
5601         mine_transaction(&nodes[0], &htlc_timeout);
5602         connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
5603         expect_payment_failed!(nodes[0], our_payment_hash, false);
5604
5605         // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
5606         let new_keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
5607         let spend_txn = check_spendable_outputs!(nodes[0], new_keys_manager);
5608         assert_eq!(spend_txn.len(), 3);
5609         check_spends!(spend_txn[0], local_txn_1[0]);
5610         assert_eq!(spend_txn[1].input.len(), 1);
5611         check_spends!(spend_txn[1], htlc_timeout);
5612         assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5613         assert_eq!(spend_txn[2].input.len(), 2);
5614         check_spends!(spend_txn[2], local_txn_1[0], htlc_timeout);
5615         assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 ||
5616                 spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32);
5617 }
5618
5619 #[test]
5620 fn test_static_output_closing_tx() {
5621         let chanmon_cfgs = create_chanmon_cfgs(2);
5622         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5623         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5624         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5625
5626         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5627
5628         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
5629         let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
5630
5631         mine_transaction(&nodes[0], &closing_tx);
5632         check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
5633         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
5634
5635         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
5636         assert_eq!(spend_txn.len(), 1);
5637         check_spends!(spend_txn[0], closing_tx);
5638
5639         mine_transaction(&nodes[1], &closing_tx);
5640         check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
5641         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5642
5643         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5644         assert_eq!(spend_txn.len(), 1);
5645         check_spends!(spend_txn[0], closing_tx);
5646 }
5647
5648 fn do_htlc_claim_local_commitment_only(use_dust: bool) {
5649         let chanmon_cfgs = create_chanmon_cfgs(2);
5650         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5651         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5652         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5653         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5654
5655         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3_000_000 });
5656
5657         // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being
5658         // present in B's local commitment transaction, but none of A's commitment transactions.
5659         nodes[1].node.claim_funds(payment_preimage);
5660         check_added_monitors!(nodes[1], 1);
5661         expect_payment_claimed!(nodes[1], payment_hash, if use_dust { 50000 } else { 3_000_000 });
5662
5663         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5664         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
5665         expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
5666
5667         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
5668         check_added_monitors!(nodes[0], 1);
5669         let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5670         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0);
5671         check_added_monitors!(nodes[1], 1);
5672
5673         let starting_block = nodes[1].best_block_info();
5674         let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
5675         for _ in starting_block.1 + 1..TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + starting_block.1 + 2 {
5676                 connect_block(&nodes[1], &block);
5677                 block.header.prev_blockhash = block.block_hash();
5678         }
5679         test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
5680         check_closed_broadcast!(nodes[1], true);
5681         check_added_monitors!(nodes[1], 1);
5682         check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 100000);
5683 }
5684
5685 fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
5686         let chanmon_cfgs = create_chanmon_cfgs(2);
5687         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5688         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5689         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5690         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5691
5692         let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], if use_dust { 50000 } else { 3000000 });
5693         nodes[0].node.send_payment_with_route(&route, payment_hash,
5694                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
5695         check_added_monitors!(nodes[0], 1);
5696
5697         let _as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5698
5699         // As far as A is concerned, the HTLC is now present only in the latest remote commitment
5700         // transaction, however it is not in A's latest local commitment, so we can just broadcast that
5701         // to "time out" the HTLC.
5702
5703         let starting_block = nodes[1].best_block_info();
5704         let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
5705
5706         for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + starting_block.1 + 2 {
5707                 connect_block(&nodes[0], &block);
5708                 block.header.prev_blockhash = block.block_hash();
5709         }
5710         test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
5711         check_closed_broadcast!(nodes[0], true);
5712         check_added_monitors!(nodes[0], 1);
5713         check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
5714 }
5715
5716 fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
5717         let chanmon_cfgs = create_chanmon_cfgs(3);
5718         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
5719         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
5720         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
5721         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5722
5723         // Fail the payment, but don't deliver A's final RAA, resulting in the HTLC only being present
5724         // in B's previous (unrevoked) commitment transaction, but none of A's commitment transactions.
5725         // Also optionally test that we *don't* fail the channel in case the commitment transaction was
5726         // actually revoked.
5727         let htlc_value = if use_dust { 50000 } else { 3000000 };
5728         let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
5729         nodes[1].node.fail_htlc_backwards(&our_payment_hash);
5730         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
5731         check_added_monitors!(nodes[1], 1);
5732
5733         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5734         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
5735         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
5736         check_added_monitors!(nodes[0], 1);
5737         let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5738         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0);
5739         check_added_monitors!(nodes[1], 1);
5740         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.1);
5741         check_added_monitors!(nodes[1], 1);
5742         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
5743
5744         if check_revoke_no_close {
5745                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
5746                 check_added_monitors!(nodes[0], 1);
5747         }
5748
5749         let starting_block = nodes[1].best_block_info();
5750         let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
5751         for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 2 {
5752                 connect_block(&nodes[0], &block);
5753                 block.header.prev_blockhash = block.block_hash();
5754         }
5755         if !check_revoke_no_close {
5756                 test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
5757                 check_closed_broadcast!(nodes[0], true);
5758                 check_added_monitors!(nodes[0], 1);
5759                 check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
5760         } else {
5761                 expect_payment_failed!(nodes[0], our_payment_hash, true);
5762         }
5763 }
5764
5765 // Test that we close channels on-chain when broadcastable HTLCs reach their timeout window.
5766 // There are only a few cases to test here:
5767 //  * its not really normative behavior, but we test that below-dust HTLCs "included" in
5768 //    broadcastable commitment transactions result in channel closure,
5769 //  * its included in an unrevoked-but-previous remote commitment transaction,
5770 //  * its included in the latest remote or local commitment transactions.
5771 // We test each of the three possible commitment transactions individually and use both dust and
5772 // non-dust HTLCs.
5773 // Note that we don't bother testing both outbound and inbound HTLC failures for each case, and we
5774 // assume they are handled the same across all six cases, as both outbound and inbound failures are
5775 // tested for at least one of the cases in other tests.
5776 #[test]
5777 fn htlc_claim_single_commitment_only_a() {
5778         do_htlc_claim_local_commitment_only(true);
5779         do_htlc_claim_local_commitment_only(false);
5780
5781         do_htlc_claim_current_remote_commitment_only(true);
5782         do_htlc_claim_current_remote_commitment_only(false);
5783 }
5784
5785 #[test]
5786 fn htlc_claim_single_commitment_only_b() {
5787         do_htlc_claim_previous_remote_commitment_only(true, false);
5788         do_htlc_claim_previous_remote_commitment_only(false, false);
5789         do_htlc_claim_previous_remote_commitment_only(true, true);
5790         do_htlc_claim_previous_remote_commitment_only(false, true);
5791 }
5792
5793 #[test]
5794 #[should_panic]
5795 fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be on its own as we are catching a panic
5796         let chanmon_cfgs = create_chanmon_cfgs(2);
5797         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5798         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5799         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5800         // Force duplicate randomness for every get-random call
5801         for node in nodes.iter() {
5802                 *node.keys_manager.override_random_bytes.lock().unwrap() = Some([0; 32]);
5803         }
5804
5805         // BOLT #2 spec: Sending node must ensure temporary_channel_id is unique from any other channel ID with the same peer.
5806         let channel_value_satoshis=10000;
5807         let push_msat=10001;
5808         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).unwrap();
5809         let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5810         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel);
5811         get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
5812
5813         // Create a second channel with the same random values. This used to panic due to a colliding
5814         // channel_id, but now panics due to a colliding outbound SCID alias.
5815         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err());
5816 }
5817
5818 #[test]
5819 fn bolt2_open_channel_sending_node_checks_part2() {
5820         let chanmon_cfgs = create_chanmon_cfgs(2);
5821         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5822         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5823         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5824
5825         // BOLT #2 spec: Sending node must set funding_satoshis to less than 2^24 satoshis
5826         let channel_value_satoshis=2^24;
5827         let push_msat=10001;
5828         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err());
5829
5830         // BOLT #2 spec: Sending node must set push_msat to equal or less than 1000 * funding_satoshis
5831         let channel_value_satoshis=10000;
5832         // Test when push_msat is equal to 1000 * funding_satoshis.
5833         let push_msat=1000*channel_value_satoshis+1;
5834         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err());
5835
5836         // BOLT #2 spec: Sending node must set set channel_reserve_satoshis greater than or equal to dust_limit_satoshis
5837         let channel_value_satoshis=10000;
5838         let push_msat=10001;
5839         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_ok()); //Create a valid channel
5840         let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5841         assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.common_fields.dust_limit_satoshis);
5842
5843         // BOLT #2 spec: Sending node must set undefined bits in channel_flags to 0
5844         // Only the least-significant bit of channel_flags is currently defined resulting in channel_flags only having one of two possible states 0 or 1
5845         assert!(node0_to_1_send_open_channel.common_fields.channel_flags<=1);
5846
5847         // BOLT #2 spec: Sending node should set to_self_delay sufficient to ensure the sender can irreversibly spend a commitment transaction output, in case of misbehaviour by the receiver.
5848         assert!(BREAKDOWN_TIMEOUT>0);
5849         assert!(node0_to_1_send_open_channel.common_fields.to_self_delay==BREAKDOWN_TIMEOUT);
5850
5851         // BOLT #2 spec: Sending node must ensure the chain_hash value identifies the chain it wishes to open the channel within.
5852         let chain_hash = ChainHash::using_genesis_block(Network::Testnet);
5853         assert_eq!(node0_to_1_send_open_channel.common_fields.chain_hash, chain_hash);
5854
5855         // BOLT #2 spec: Sending node must set funding_pubkey, revocation_basepoint, htlc_basepoint, payment_basepoint, and delayed_payment_basepoint to valid DER-encoded, compressed, secp256k1 pubkeys.
5856         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.funding_pubkey.serialize()).is_ok());
5857         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.revocation_basepoint.serialize()).is_ok());
5858         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.htlc_basepoint.serialize()).is_ok());
5859         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.payment_basepoint.serialize()).is_ok());
5860         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.delayed_payment_basepoint.serialize()).is_ok());
5861 }
5862
5863 #[test]
5864 fn bolt2_open_channel_sane_dust_limit() {
5865         let chanmon_cfgs = create_chanmon_cfgs(2);
5866         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5867         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5868         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5869
5870         let channel_value_satoshis=1000000;
5871         let push_msat=10001;
5872         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).unwrap();
5873         let mut node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5874         node0_to_1_send_open_channel.common_fields.dust_limit_satoshis = 547;
5875         node0_to_1_send_open_channel.channel_reserve_satoshis = 100001;
5876
5877         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel);
5878         let events = nodes[1].node.get_and_clear_pending_msg_events();
5879         let err_msg = match events[0] {
5880                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
5881                         msg.clone()
5882                 },
5883                 _ => panic!("Unexpected event"),
5884         };
5885         assert_eq!(err_msg.data, "dust_limit_satoshis (547) is greater than the implementation limit (546)");
5886 }
5887
5888 // Test that if we fail to send an HTLC that is being freed from the holding cell, and the HTLC
5889 // originated from our node, its failure is surfaced to the user. We trigger this failure to
5890 // free the HTLC by increasing our fee while the HTLC is in the holding cell such that the HTLC
5891 // is no longer affordable once it's freed.
5892 #[test]
5893 fn test_fail_holding_cell_htlc_upon_free() {
5894         let chanmon_cfgs = create_chanmon_cfgs(2);
5895         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5896         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5897         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5898         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
5899
5900         // First nodes[0] generates an update_fee, setting the channel's
5901         // pending_update_fee.
5902         {
5903                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
5904                 *feerate_lock += 20;
5905         }
5906         nodes[0].node.timer_tick_occurred();
5907         check_added_monitors!(nodes[0], 1);
5908
5909         let events = nodes[0].node.get_and_clear_pending_msg_events();
5910         assert_eq!(events.len(), 1);
5911         let (update_msg, commitment_signed) = match events[0] {
5912                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
5913                         (update_fee.as_ref(), commitment_signed)
5914                 },
5915                 _ => panic!("Unexpected event"),
5916         };
5917
5918         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
5919
5920         let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5921         let channel_reserve = chan_stat.channel_reserve_msat;
5922         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
5923         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
5924
5925         // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
5926         let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
5927         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
5928
5929         // Send a payment which passes reserve checks but gets stuck in the holding cell.
5930         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
5931                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
5932         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5933         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send);
5934
5935         // Flush the pending fee update.
5936         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
5937         let (as_revoke_and_ack, _) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5938         check_added_monitors!(nodes[1], 1);
5939         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack);
5940         check_added_monitors!(nodes[0], 1);
5941
5942         // Upon receipt of the RAA, there will be an attempt to resend the holding cell
5943         // HTLC, but now that the fee has been raised the payment will now fail, causing
5944         // us to surface its failure to the user.
5945         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5946         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
5947         nodes[0].logger.assert_log("lightning::ln::channel", format!("Freeing holding cell with 1 HTLC updates in channel {}", chan.2), 1);
5948
5949         // Check that the payment failed to be sent out.
5950         let events = nodes[0].node.get_and_clear_pending_events();
5951         assert_eq!(events.len(), 2);
5952         match &events[0] {
5953                 &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => {
5954                         assert_eq!(PaymentId(our_payment_hash.0), *payment_id.as_ref().unwrap());
5955                         assert_eq!(our_payment_hash.clone(), *payment_hash);
5956                         assert_eq!(*payment_failed_permanently, false);
5957                         assert_eq!(*short_channel_id, Some(route.paths[0].hops[0].short_channel_id));
5958                 },
5959                 _ => panic!("Unexpected event"),
5960         }
5961         match &events[1] {
5962                 &Event::PaymentFailed { ref payment_hash, .. } => {
5963                         assert_eq!(our_payment_hash.clone(), *payment_hash);
5964                 },
5965                 _ => panic!("Unexpected event"),
5966         }
5967 }
5968
5969 // Test that if multiple HTLCs are released from the holding cell and one is
5970 // valid but the other is no longer valid upon release, the valid HTLC can be
5971 // successfully completed while the other one fails as expected.
5972 #[test]
5973 fn test_free_and_fail_holding_cell_htlcs() {
5974         let chanmon_cfgs = create_chanmon_cfgs(2);
5975         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5976         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5977         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5978         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
5979
5980         // First nodes[0] generates an update_fee, setting the channel's
5981         // pending_update_fee.
5982         {
5983                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
5984                 *feerate_lock += 200;
5985         }
5986         nodes[0].node.timer_tick_occurred();
5987         check_added_monitors!(nodes[0], 1);
5988
5989         let events = nodes[0].node.get_and_clear_pending_msg_events();
5990         assert_eq!(events.len(), 1);
5991         let (update_msg, commitment_signed) = match events[0] {
5992                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
5993                         (update_fee.as_ref(), commitment_signed)
5994                 },
5995                 _ => panic!("Unexpected event"),
5996         };
5997
5998         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
5999
6000         let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6001         let channel_reserve = chan_stat.channel_reserve_msat;
6002         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
6003         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
6004
6005         // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
6006         let amt_1 = 20000;
6007         let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features) - amt_1;
6008         let (route_1, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_1);
6009         let (route_2, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_2);
6010
6011         // Send 2 payments which pass reserve checks but get stuck in the holding cell.
6012         nodes[0].node.send_payment_with_route(&route_1, payment_hash_1,
6013                 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
6014         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6015         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1);
6016         let payment_id_2 = PaymentId(nodes[0].keys_manager.get_secure_random_bytes());
6017         nodes[0].node.send_payment_with_route(&route_2, payment_hash_2,
6018                 RecipientOnionFields::secret_only(payment_secret_2), payment_id_2).unwrap();
6019         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6020         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1 + amt_2);
6021
6022         // Flush the pending fee update.
6023         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
6024         let (revoke_and_ack, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6025         check_added_monitors!(nodes[1], 1);
6026         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_and_ack);
6027         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
6028         check_added_monitors!(nodes[0], 2);
6029
6030         // Upon receipt of the RAA, there will be an attempt to resend the holding cell HTLCs,
6031         // but now that the fee has been raised the second payment will now fail, causing us
6032         // to surface its failure to the user. The first payment should succeed.
6033         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6034         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
6035         nodes[0].logger.assert_log("lightning::ln::channel", format!("Freeing holding cell with 2 HTLC updates in channel {}", chan.2), 1);
6036
6037         // Check that the second payment failed to be sent out.
6038         let events = nodes[0].node.get_and_clear_pending_events();
6039         assert_eq!(events.len(), 2);
6040         match &events[0] {
6041                 &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => {
6042                         assert_eq!(payment_id_2, *payment_id.as_ref().unwrap());
6043                         assert_eq!(payment_hash_2.clone(), *payment_hash);
6044                         assert_eq!(*payment_failed_permanently, false);
6045                         assert_eq!(*short_channel_id, Some(route_2.paths[0].hops[0].short_channel_id));
6046                 },
6047                 _ => panic!("Unexpected event"),
6048         }
6049         match &events[1] {
6050                 &Event::PaymentFailed { ref payment_hash, .. } => {
6051                         assert_eq!(payment_hash_2.clone(), *payment_hash);
6052                 },
6053                 _ => panic!("Unexpected event"),
6054         }
6055
6056         // Complete the first payment and the RAA from the fee update.
6057         let (payment_event, send_raa_event) = {
6058                 let mut msgs = nodes[0].node.get_and_clear_pending_msg_events();
6059                 assert_eq!(msgs.len(), 2);
6060                 (SendEvent::from_event(msgs.remove(0)), msgs.remove(0))
6061         };
6062         let raa = match send_raa_event {
6063                 MessageSendEvent::SendRevokeAndACK { msg, .. } => msg,
6064                 _ => panic!("Unexpected event"),
6065         };
6066         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
6067         check_added_monitors!(nodes[1], 1);
6068         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6069         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6070         let events = nodes[1].node.get_and_clear_pending_events();
6071         assert_eq!(events.len(), 1);
6072         match events[0] {
6073                 Event::PendingHTLCsForwardable { .. } => {},
6074                 _ => panic!("Unexpected event"),
6075         }
6076         nodes[1].node.process_pending_htlc_forwards();
6077         let events = nodes[1].node.get_and_clear_pending_events();
6078         assert_eq!(events.len(), 1);
6079         match events[0] {
6080                 Event::PaymentClaimable { .. } => {},
6081                 _ => panic!("Unexpected event"),
6082         }
6083         nodes[1].node.claim_funds(payment_preimage_1);
6084         check_added_monitors!(nodes[1], 1);
6085         expect_payment_claimed!(nodes[1], payment_hash_1, amt_1);
6086
6087         let update_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6088         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msgs.update_fulfill_htlcs[0]);
6089         commitment_signed_dance!(nodes[0], nodes[1], update_msgs.commitment_signed, false, true);
6090         expect_payment_sent!(nodes[0], payment_preimage_1);
6091 }
6092
6093 // Test that if we fail to forward an HTLC that is being freed from the holding cell that the
6094 // HTLC is failed backwards. We trigger this failure to forward the freed HTLC by increasing
6095 // our fee while the HTLC is in the holding cell such that the HTLC is no longer affordable
6096 // once it's freed.
6097 #[test]
6098 fn test_fail_holding_cell_htlc_upon_free_multihop() {
6099         let chanmon_cfgs = create_chanmon_cfgs(3);
6100         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6101         // Avoid having to include routing fees in calculations
6102         let mut config = test_default_channel_config();
6103         config.channel_config.forwarding_fee_base_msat = 0;
6104         config.channel_config.forwarding_fee_proportional_millionths = 0;
6105         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
6106         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6107         let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6108         let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000);
6109
6110         // First nodes[1] generates an update_fee, setting the channel's
6111         // pending_update_fee.
6112         {
6113                 let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
6114                 *feerate_lock += 20;
6115         }
6116         nodes[1].node.timer_tick_occurred();
6117         check_added_monitors!(nodes[1], 1);
6118
6119         let events = nodes[1].node.get_and_clear_pending_msg_events();
6120         assert_eq!(events.len(), 1);
6121         let (update_msg, commitment_signed) = match events[0] {
6122                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
6123                         (update_fee.as_ref(), commitment_signed)
6124                 },
6125                 _ => panic!("Unexpected event"),
6126         };
6127
6128         nodes[2].node.handle_update_fee(&nodes[1].node.get_our_node_id(), update_msg.unwrap());
6129
6130         let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan_0_1.2);
6131         let channel_reserve = chan_stat.channel_reserve_msat;
6132         let feerate = get_feerate!(nodes[0], nodes[1], chan_0_1.2);
6133         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_0_1.2);
6134
6135         // Send a payment which passes reserve checks but gets stuck in the holding cell.
6136         let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
6137         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], max_can_send);
6138         let payment_event = {
6139                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6140                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6141                 check_added_monitors!(nodes[0], 1);
6142
6143                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6144                 assert_eq!(events.len(), 1);
6145
6146                 SendEvent::from_event(events.remove(0))
6147         };
6148         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6149         check_added_monitors!(nodes[1], 0);
6150         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6151         expect_pending_htlcs_forwardable!(nodes[1]);
6152
6153         chan_stat = get_channel_value_stat!(nodes[1], nodes[2], chan_1_2.2);
6154         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send);
6155
6156         // Flush the pending fee update.
6157         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
6158         let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
6159         check_added_monitors!(nodes[2], 1);
6160         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &raa);
6161         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &commitment_signed);
6162         check_added_monitors!(nodes[1], 2);
6163
6164         // A final RAA message is generated to finalize the fee update.
6165         let events = nodes[1].node.get_and_clear_pending_msg_events();
6166         assert_eq!(events.len(), 1);
6167
6168         let raa_msg = match &events[0] {
6169                 &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => {
6170                         msg.clone()
6171                 },
6172                 _ => panic!("Unexpected event"),
6173         };
6174
6175         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa_msg);
6176         check_added_monitors!(nodes[2], 1);
6177         assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
6178
6179         // nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process.
6180         let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events();
6181         assert_eq!(process_htlc_forwards_event.len(), 2);
6182         match &process_htlc_forwards_event[1] {
6183                 &Event::PendingHTLCsForwardable { .. } => {},
6184                 _ => panic!("Unexpected event"),
6185         }
6186
6187         // In response, we call ChannelManager's process_pending_htlc_forwards
6188         nodes[1].node.process_pending_htlc_forwards();
6189         check_added_monitors!(nodes[1], 1);
6190
6191         // This causes the HTLC to be failed backwards.
6192         let fail_event = nodes[1].node.get_and_clear_pending_msg_events();
6193         assert_eq!(fail_event.len(), 1);
6194         let (fail_msg, commitment_signed) = match &fail_event[0] {
6195                 &MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
6196                         assert_eq!(updates.update_add_htlcs.len(), 0);
6197                         assert_eq!(updates.update_fulfill_htlcs.len(), 0);
6198                         assert_eq!(updates.update_fail_malformed_htlcs.len(), 0);
6199                         assert_eq!(updates.update_fail_htlcs.len(), 1);
6200                         (updates.update_fail_htlcs[0].clone(), updates.commitment_signed.clone())
6201                 },
6202                 _ => panic!("Unexpected event"),
6203         };
6204
6205         // Pass the failure messages back to nodes[0].
6206         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg);
6207         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
6208
6209         // Complete the HTLC failure+removal process.
6210         let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6211         check_added_monitors!(nodes[0], 1);
6212         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
6213         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed);
6214         check_added_monitors!(nodes[1], 2);
6215         let final_raa_event = nodes[1].node.get_and_clear_pending_msg_events();
6216         assert_eq!(final_raa_event.len(), 1);
6217         let raa = match &final_raa_event[0] {
6218                 &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => msg.clone(),
6219                 _ => panic!("Unexpected event"),
6220         };
6221         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa);
6222         expect_payment_failed_with_update!(nodes[0], our_payment_hash, false, chan_1_2.0.contents.short_channel_id, false);
6223         check_added_monitors!(nodes[0], 1);
6224 }
6225
6226 #[test]
6227 fn test_payment_route_reaching_same_channel_twice() {
6228         //A route should not go through the same channel twice
6229         //It is enforced when constructing a route.
6230         let chanmon_cfgs = create_chanmon_cfgs(2);
6231         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6232         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6233         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6234         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
6235
6236         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0)
6237                 .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
6238         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000);
6239
6240         // Extend the path by itself, essentially simulating route going through same channel twice
6241         let cloned_hops = route.paths[0].hops.clone();
6242         route.paths[0].hops.extend_from_slice(&cloned_hops);
6243
6244         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6245                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6246         ), false, APIError::InvalidRoute { ref err },
6247         assert_eq!(err, &"Path went through the same channel twice"));
6248 }
6249
6250 // BOLT 2 Requirements for the Sender when constructing and sending an update_add_htlc message.
6251 // BOLT 2 Requirement: MUST NOT offer amount_msat it cannot pay for in the remote commitment transaction at the current feerate_per_kw (see "Updating Fees") while maintaining its channel reserve.
6252 //TODO: I don't believe this is explicitly enforced when sending an HTLC but as the Fee aspect of the BOLT specs is in flux leaving this as a TODO.
6253
6254 #[test]
6255 fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() {
6256         //BOLT2 Requirement: MUST NOT offer amount_msat below the receiving node's htlc_minimum_msat (same validation check catches both of these)
6257         let chanmon_cfgs = create_chanmon_cfgs(2);
6258         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6259         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6260         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6261         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6262
6263         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6264         route.paths[0].hops[0].fee_msat = 100;
6265
6266         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6267                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6268                 ), true, APIError::ChannelUnavailable { .. }, {});
6269         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6270 }
6271
6272 #[test]
6273 fn test_update_add_htlc_bolt2_sender_zero_value_msat() {
6274         //BOLT2 Requirement: MUST offer amount_msat greater than 0.
6275         let chanmon_cfgs = create_chanmon_cfgs(2);
6276         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6277         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6278         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6279         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6280
6281         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6282         route.paths[0].hops[0].fee_msat = 0;
6283         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6284                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)),
6285                 true, APIError::ChannelUnavailable { ref err },
6286                 assert_eq!(err, "Cannot send 0-msat HTLC"));
6287
6288         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6289         nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send 0-msat HTLC", 1);
6290 }
6291
6292 #[test]
6293 fn test_update_add_htlc_bolt2_receiver_zero_value_msat() {
6294         //BOLT2 Requirement: MUST offer amount_msat greater than 0.
6295         let chanmon_cfgs = create_chanmon_cfgs(2);
6296         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6297         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6298         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6299         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6300
6301         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6302         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6303                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6304         check_added_monitors!(nodes[0], 1);
6305         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6306         updates.update_add_htlcs[0].amount_msat = 0;
6307
6308         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6309         nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC", 3);
6310         check_closed_broadcast!(nodes[1], true).unwrap();
6311         check_added_monitors!(nodes[1], 1);
6312         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() },
6313                 [nodes[0].node.get_our_node_id()], 100000);
6314 }
6315
6316 #[test]
6317 fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() {
6318         //BOLT 2 Requirement: MUST set cltv_expiry less than 500000000.
6319         //It is enforced when constructing a route.
6320         let chanmon_cfgs = create_chanmon_cfgs(2);
6321         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6322         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6323         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6324         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
6325
6326         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0)
6327                 .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
6328         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000);
6329         route.paths[0].hops.last_mut().unwrap().cltv_expiry_delta = 500000001;
6330         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6331                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6332                 ), true, APIError::InvalidRoute { ref err },
6333                 assert_eq!(err, &"Channel CLTV overflowed?"));
6334 }
6335
6336 #[test]
6337 fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() {
6338         //BOLT 2 Requirement: if result would be offering more than the remote's max_accepted_htlcs HTLCs, in the remote commitment transaction: MUST NOT add an HTLC.
6339         //BOLT 2 Requirement: for the first HTLC it offers MUST set id to 0.
6340         //BOLT 2 Requirement: MUST increase the value of id by 1 for each successive offer.
6341         let chanmon_cfgs = create_chanmon_cfgs(2);
6342         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6343         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6344         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6345         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
6346         let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
6347                 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().counterparty_max_accepted_htlcs as u64;
6348
6349         // Fetch a route in advance as we will be unable to once we're unable to send.
6350         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6351         for i in 0..max_accepted_htlcs {
6352                 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6353                 let payment_event = {
6354                         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6355                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6356                         check_added_monitors!(nodes[0], 1);
6357
6358                         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6359                         assert_eq!(events.len(), 1);
6360                         if let MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate{ update_add_htlcs: ref htlcs, .. }, } = events[0] {
6361                                 assert_eq!(htlcs[0].htlc_id, i);
6362                         } else {
6363                                 assert!(false);
6364                         }
6365                         SendEvent::from_event(events.remove(0))
6366                 };
6367                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6368                 check_added_monitors!(nodes[1], 0);
6369                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6370
6371                 expect_pending_htlcs_forwardable!(nodes[1]);
6372                 expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 100000);
6373         }
6374         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6375                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6376                 ), true, APIError::ChannelUnavailable { .. }, {});
6377
6378         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6379 }
6380
6381 #[test]
6382 fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() {
6383         //BOLT 2 Requirement: if the sum of total offered HTLCs would exceed the remote's max_htlc_value_in_flight_msat: MUST NOT add an HTLC.
6384         let chanmon_cfgs = create_chanmon_cfgs(2);
6385         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6386         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6387         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6388         let channel_value = 100000;
6389         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 0);
6390         let max_in_flight = get_channel_value_stat!(nodes[0], nodes[1], chan.2).counterparty_max_htlc_value_in_flight_msat;
6391
6392         send_payment(&nodes[0], &vec!(&nodes[1])[..], max_in_flight);
6393
6394         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_in_flight);
6395         // Manually create a route over our max in flight (which our router normally automatically
6396         // limits us to.
6397         route.paths[0].hops[0].fee_msat =  max_in_flight + 1;
6398         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6399                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6400                 ), true, APIError::ChannelUnavailable { .. }, {});
6401         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6402
6403         send_payment(&nodes[0], &[&nodes[1]], max_in_flight);
6404 }
6405
6406 // BOLT 2 Requirements for the Receiver when handling an update_add_htlc message.
6407 #[test]
6408 fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
6409         //BOLT2 Requirement: receiving an amount_msat equal to 0, OR less than its own htlc_minimum_msat -> SHOULD fail the channel.
6410         let chanmon_cfgs = create_chanmon_cfgs(2);
6411         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6412         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6413         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6414         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6415         let htlc_minimum_msat: u64;
6416         {
6417                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
6418                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
6419                 let channel = chan_lock.channel_by_id.get(&chan.2).unwrap();
6420                 htlc_minimum_msat = channel.context().get_holder_htlc_minimum_msat();
6421         }
6422
6423         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat);
6424         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6425                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6426         check_added_monitors!(nodes[0], 1);
6427         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6428         updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat-1;
6429         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6430         assert!(nodes[1].node.list_channels().is_empty());
6431         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6432         assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
6433         check_added_monitors!(nodes[1], 1);
6434         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6435 }
6436
6437 #[test]
6438 fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
6439         //BOLT2 Requirement: receiving an amount_msat that the sending node cannot afford at the current feerate_per_kw (while maintaining its channel reserve): SHOULD fail the channel
6440         let chanmon_cfgs = create_chanmon_cfgs(2);
6441         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6442         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6443         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6444         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6445
6446         let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6447         let channel_reserve = chan_stat.channel_reserve_msat;
6448         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
6449         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
6450         // The 2* and +1 are for the fee spike reserve.
6451         let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
6452
6453         let max_can_send = 5000000 - channel_reserve - commit_tx_fee_outbound;
6454         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
6455         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6456                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6457         check_added_monitors!(nodes[0], 1);
6458         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6459
6460         // Even though channel-initiator senders are required to respect the fee_spike_reserve,
6461         // at this time channel-initiatee receivers are not required to enforce that senders
6462         // respect the fee_spike_reserve.
6463         updates.update_add_htlcs[0].amount_msat = max_can_send + commit_tx_fee_outbound + 1;
6464         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6465
6466         assert!(nodes[1].node.list_channels().is_empty());
6467         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6468         assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
6469         check_added_monitors!(nodes[1], 1);
6470         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6471 }
6472
6473 #[test]
6474 fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
6475         //BOLT 2 Requirement: if a sending node adds more than its max_accepted_htlcs HTLCs to its local commitment transaction: SHOULD fail the channel
6476         //BOLT 2 Requirement: MUST allow multiple HTLCs with the same payment_hash.
6477         let chanmon_cfgs = create_chanmon_cfgs(2);
6478         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6479         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6480         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6481         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6482
6483         let send_amt = 3999999;
6484         let (mut route, our_payment_hash, _, our_payment_secret) =
6485                 get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
6486         route.paths[0].hops[0].fee_msat = send_amt;
6487         let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
6488         let cur_height = nodes[0].node.best_block.read().unwrap().height + 1;
6489         let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route.paths[0], &session_priv).unwrap();
6490         let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
6491                 &route.paths[0], send_amt, RecipientOnionFields::secret_only(our_payment_secret), cur_height, &None).unwrap();
6492         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap();
6493
6494         let mut msg = msgs::UpdateAddHTLC {
6495                 channel_id: chan.2,
6496                 htlc_id: 0,
6497                 amount_msat: 1000,
6498                 payment_hash: our_payment_hash,
6499                 cltv_expiry: htlc_cltv,
6500                 onion_routing_packet: onion_packet.clone(),
6501                 skimmed_fee_msat: None,
6502                 blinding_point: None,
6503         };
6504
6505         for i in 0..50 {
6506                 msg.htlc_id = i as u64;
6507                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
6508         }
6509         msg.htlc_id = (50) as u64;
6510         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
6511
6512         assert!(nodes[1].node.list_channels().is_empty());
6513         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6514         assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
6515         check_added_monitors!(nodes[1], 1);
6516         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6517 }
6518
6519 #[test]
6520 fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() {
6521         //OR adds more than its max_htlc_value_in_flight_msat worth of offered HTLCs to its local commitment transaction: SHOULD fail the channel
6522         let chanmon_cfgs = create_chanmon_cfgs(2);
6523         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6524         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6525         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6526         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6527
6528         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6529         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6530                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6531         check_added_monitors!(nodes[0], 1);
6532         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6533         updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], nodes[0], chan.2).counterparty_max_htlc_value_in_flight_msat + 1;
6534         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6535
6536         assert!(nodes[1].node.list_channels().is_empty());
6537         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6538         assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str()));
6539         check_added_monitors!(nodes[1], 1);
6540         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 1000000);
6541 }
6542
6543 #[test]
6544 fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() {
6545         //BOLT2 Requirement: if sending node sets cltv_expiry to greater or equal to 500000000: SHOULD fail the channel.
6546         let chanmon_cfgs = create_chanmon_cfgs(2);
6547         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6548         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6549         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6550
6551         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6552         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6553         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6554                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6555         check_added_monitors!(nodes[0], 1);
6556         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6557         updates.update_add_htlcs[0].cltv_expiry = 500000000;
6558         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6559
6560         assert!(nodes[1].node.list_channels().is_empty());
6561         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6562         assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height");
6563         check_added_monitors!(nodes[1], 1);
6564         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6565 }
6566
6567 #[test]
6568 fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
6569         //BOLT 2 requirement: if the sender did not previously acknowledge the commitment of that HTLC: MUST ignore a repeated id value after a reconnection.
6570         // We test this by first testing that that repeated HTLCs pass commitment signature checks
6571         // after disconnect and that non-sequential htlc_ids result in a channel failure.
6572         let chanmon_cfgs = create_chanmon_cfgs(2);
6573         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6574         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6575         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6576
6577         create_announced_chan_between_nodes(&nodes, 0, 1);
6578         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6579         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6580                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6581         check_added_monitors!(nodes[0], 1);
6582         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6583         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6584
6585         //Disconnect and Reconnect
6586         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
6587         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
6588         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
6589                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
6590         }, true).unwrap();
6591         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
6592         assert_eq!(reestablish_1.len(), 1);
6593         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
6594                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
6595         }, false).unwrap();
6596         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
6597         assert_eq!(reestablish_2.len(), 1);
6598         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
6599         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
6600         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
6601         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
6602
6603         //Resend HTLC
6604         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6605         assert_eq!(updates.commitment_signed.htlc_signatures.len(), 1);
6606         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed);
6607         check_added_monitors!(nodes[1], 1);
6608         let _bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6609
6610         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6611
6612         assert!(nodes[1].node.list_channels().is_empty());
6613         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6614         assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str()));
6615         check_added_monitors!(nodes[1], 1);
6616         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6617 }
6618
6619 #[test]
6620 fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() {
6621         //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6622
6623         let chanmon_cfgs = create_chanmon_cfgs(2);
6624         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6625         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6626         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6627         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6628         let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6629         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6630                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6631
6632         check_added_monitors!(nodes[0], 1);
6633         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6634         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6635
6636         let update_msg = msgs::UpdateFulfillHTLC{
6637                 channel_id: chan.2,
6638                 htlc_id: 0,
6639                 payment_preimage: our_payment_preimage,
6640         };
6641
6642         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6643
6644         assert!(nodes[0].node.list_channels().is_empty());
6645         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6646         assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6647         check_added_monitors!(nodes[0], 1);
6648         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6649 }
6650
6651 #[test]
6652 fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() {
6653         //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6654
6655         let chanmon_cfgs = create_chanmon_cfgs(2);
6656         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6657         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6658         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6659         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6660
6661         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6662         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6663                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6664         check_added_monitors!(nodes[0], 1);
6665         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6666         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6667
6668         let update_msg = msgs::UpdateFailHTLC{
6669                 channel_id: chan.2,
6670                 htlc_id: 0,
6671                 reason: msgs::OnionErrorPacket { data: Vec::new()},
6672         };
6673
6674         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6675
6676         assert!(nodes[0].node.list_channels().is_empty());
6677         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6678         assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6679         check_added_monitors!(nodes[0], 1);
6680         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6681 }
6682
6683 #[test]
6684 fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() {
6685         //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6686
6687         let chanmon_cfgs = create_chanmon_cfgs(2);
6688         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6689         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6690         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6691         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6692
6693         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6694         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6695                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6696         check_added_monitors!(nodes[0], 1);
6697         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6698         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6699         let update_msg = msgs::UpdateFailMalformedHTLC{
6700                 channel_id: chan.2,
6701                 htlc_id: 0,
6702                 sha256_of_onion: [1; 32],
6703                 failure_code: 0x8000,
6704         };
6705
6706         nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6707
6708         assert!(nodes[0].node.list_channels().is_empty());
6709         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6710         assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6711         check_added_monitors!(nodes[0], 1);
6712         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6713 }
6714
6715 #[test]
6716 fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
6717         //BOLT 2 Requirement: A receiving node: if the id does not correspond to an HTLC in its current commitment transaction MUST fail the channel.
6718
6719         let chanmon_cfgs = create_chanmon_cfgs(2);
6720         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6721         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6722         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6723         create_announced_chan_between_nodes(&nodes, 0, 1);
6724
6725         let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
6726
6727         nodes[1].node.claim_funds(our_payment_preimage);
6728         check_added_monitors!(nodes[1], 1);
6729         expect_payment_claimed!(nodes[1], our_payment_hash, 100_000);
6730
6731         let events = nodes[1].node.get_and_clear_pending_msg_events();
6732         assert_eq!(events.len(), 1);
6733         let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
6734                 match events[0] {
6735                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6736                                 assert!(update_add_htlcs.is_empty());
6737                                 assert_eq!(update_fulfill_htlcs.len(), 1);
6738                                 assert!(update_fail_htlcs.is_empty());
6739                                 assert!(update_fail_malformed_htlcs.is_empty());
6740                                 assert!(update_fee.is_none());
6741                                 update_fulfill_htlcs[0].clone()
6742                         },
6743                         _ => panic!("Unexpected event"),
6744                 }
6745         };
6746
6747         update_fulfill_msg.htlc_id = 1;
6748
6749         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
6750
6751         assert!(nodes[0].node.list_channels().is_empty());
6752         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6753         assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find");
6754         check_added_monitors!(nodes[0], 1);
6755         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6756 }
6757
6758 #[test]
6759 fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
6760         //BOLT 2 Requirement: A receiving node: if the payment_preimage value in update_fulfill_htlc doesn't SHA256 hash to the corresponding HTLC payment_hash MUST fail the channel.
6761
6762         let chanmon_cfgs = create_chanmon_cfgs(2);
6763         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6764         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6765         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6766         create_announced_chan_between_nodes(&nodes, 0, 1);
6767
6768         let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
6769
6770         nodes[1].node.claim_funds(our_payment_preimage);
6771         check_added_monitors!(nodes[1], 1);
6772         expect_payment_claimed!(nodes[1], our_payment_hash, 100_000);
6773
6774         let events = nodes[1].node.get_and_clear_pending_msg_events();
6775         assert_eq!(events.len(), 1);
6776         let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
6777                 match events[0] {
6778                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6779                                 assert!(update_add_htlcs.is_empty());
6780                                 assert_eq!(update_fulfill_htlcs.len(), 1);
6781                                 assert!(update_fail_htlcs.is_empty());
6782                                 assert!(update_fail_malformed_htlcs.is_empty());
6783                                 assert!(update_fee.is_none());
6784                                 update_fulfill_htlcs[0].clone()
6785                         },
6786                         _ => panic!("Unexpected event"),
6787                 }
6788         };
6789
6790         update_fulfill_msg.payment_preimage = PaymentPreimage([1; 32]);
6791
6792         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
6793
6794         assert!(nodes[0].node.list_channels().is_empty());
6795         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6796         assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str()));
6797         check_added_monitors!(nodes[0], 1);
6798         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6799 }
6800
6801 #[test]
6802 fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_message() {
6803         //BOLT 2 Requirement: A receiving node: if the BADONION bit in failure_code is not set for update_fail_malformed_htlc MUST fail the channel.
6804
6805         let chanmon_cfgs = create_chanmon_cfgs(2);
6806         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6807         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6808         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6809         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6810
6811         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6812         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6813                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6814         check_added_monitors!(nodes[0], 1);
6815
6816         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6817         updates.update_add_htlcs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
6818
6819         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6820         check_added_monitors!(nodes[1], 0);
6821         commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true);
6822
6823         let events = nodes[1].node.get_and_clear_pending_msg_events();
6824
6825         let mut update_msg: msgs::UpdateFailMalformedHTLC = {
6826                 match events[0] {
6827                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6828                                 assert!(update_add_htlcs.is_empty());
6829                                 assert!(update_fulfill_htlcs.is_empty());
6830                                 assert!(update_fail_htlcs.is_empty());
6831                                 assert_eq!(update_fail_malformed_htlcs.len(), 1);
6832                                 assert!(update_fee.is_none());
6833                                 update_fail_malformed_htlcs[0].clone()
6834                         },
6835                         _ => panic!("Unexpected event"),
6836                 }
6837         };
6838         update_msg.failure_code &= !0x8000;
6839         nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6840
6841         assert!(nodes[0].node.list_channels().is_empty());
6842         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6843         assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set");
6844         check_added_monitors!(nodes[0], 1);
6845         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 1000000);
6846 }
6847
6848 #[test]
6849 fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_update_fail_htlc() {
6850         //BOLT 2 Requirement: a receiving node which has an outgoing HTLC canceled by update_fail_malformed_htlc:
6851         //    * MUST return an error in the update_fail_htlc sent to the link which originally sent the HTLC, using the failure_code given and setting the data to sha256_of_onion.
6852
6853         let chanmon_cfgs = create_chanmon_cfgs(3);
6854         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6855         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
6856         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6857         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6858         let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000);
6859
6860         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
6861
6862         //First hop
6863         let mut payment_event = {
6864                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6865                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6866                 check_added_monitors!(nodes[0], 1);
6867                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6868                 assert_eq!(events.len(), 1);
6869                 SendEvent::from_event(events.remove(0))
6870         };
6871         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6872         check_added_monitors!(nodes[1], 0);
6873         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6874         expect_pending_htlcs_forwardable!(nodes[1]);
6875         let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
6876         assert_eq!(events_2.len(), 1);
6877         check_added_monitors!(nodes[1], 1);
6878         payment_event = SendEvent::from_event(events_2.remove(0));
6879         assert_eq!(payment_event.msgs.len(), 1);
6880
6881         //Second Hop
6882         payment_event.msgs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
6883         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
6884         check_added_monitors!(nodes[2], 0);
6885         commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true);
6886
6887         let events_3 = nodes[2].node.get_and_clear_pending_msg_events();
6888         assert_eq!(events_3.len(), 1);
6889         let update_msg : (msgs::UpdateFailMalformedHTLC, msgs::CommitmentSigned) = {
6890                 match events_3[0] {
6891                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
6892                                 assert!(update_add_htlcs.is_empty());
6893                                 assert!(update_fulfill_htlcs.is_empty());
6894                                 assert!(update_fail_htlcs.is_empty());
6895                                 assert_eq!(update_fail_malformed_htlcs.len(), 1);
6896                                 assert!(update_fee.is_none());
6897                                 (update_fail_malformed_htlcs[0].clone(), commitment_signed.clone())
6898                         },
6899                         _ => panic!("Unexpected event"),
6900                 }
6901         };
6902
6903         nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), &update_msg.0);
6904
6905         check_added_monitors!(nodes[1], 0);
6906         commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true);
6907         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
6908         let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
6909         assert_eq!(events_4.len(), 1);
6910
6911         //Confirm that handlinge the update_malformed_htlc message produces an update_fail_htlc message to be forwarded back along the route
6912         match events_4[0] {
6913                 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6914                         assert!(update_add_htlcs.is_empty());
6915                         assert!(update_fulfill_htlcs.is_empty());
6916                         assert_eq!(update_fail_htlcs.len(), 1);
6917                         assert!(update_fail_malformed_htlcs.is_empty());
6918                         assert!(update_fee.is_none());
6919                 },
6920                 _ => panic!("Unexpected event"),
6921         };
6922
6923         check_added_monitors!(nodes[1], 1);
6924 }
6925
6926 #[test]
6927 fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() {
6928         let chanmon_cfgs = create_chanmon_cfgs(3);
6929         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6930         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
6931         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6932         create_announced_chan_between_nodes(&nodes, 0, 1);
6933         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
6934
6935         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100_000);
6936
6937         // First hop
6938         let mut payment_event = {
6939                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6940                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6941                 check_added_monitors!(nodes[0], 1);
6942                 SendEvent::from_node(&nodes[0])
6943         };
6944
6945         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6946         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6947         expect_pending_htlcs_forwardable!(nodes[1]);
6948         check_added_monitors!(nodes[1], 1);
6949         payment_event = SendEvent::from_node(&nodes[1]);
6950         assert_eq!(payment_event.msgs.len(), 1);
6951
6952         // Second Hop
6953         payment_event.msgs[0].onion_routing_packet.version = 1; // Trigger an invalid_onion_version error
6954         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
6955         check_added_monitors!(nodes[2], 0);
6956         commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true);
6957
6958         let events_3 = nodes[2].node.get_and_clear_pending_msg_events();
6959         assert_eq!(events_3.len(), 1);
6960         match events_3[0] {
6961                 MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
6962                         let mut update_msg = updates.update_fail_malformed_htlcs[0].clone();
6963                         // Set the NODE bit (BADONION and PERM already set in invalid_onion_version error)
6964                         update_msg.failure_code |= 0x2000;
6965
6966                         nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), &update_msg);
6967                         commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true);
6968                 },
6969                 _ => panic!("Unexpected event"),
6970         }
6971
6972         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1],
6973                 vec![HTLCDestination::NextHopChannel {
6974                         node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
6975         let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
6976         assert_eq!(events_4.len(), 1);
6977         check_added_monitors!(nodes[1], 1);
6978
6979         match events_4[0] {
6980                 MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
6981                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
6982                         commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true);
6983                 },
6984                 _ => panic!("Unexpected event"),
6985         }
6986
6987         let events_5 = nodes[0].node.get_and_clear_pending_events();
6988         assert_eq!(events_5.len(), 2);
6989
6990         // Expect a PaymentPathFailed event with a ChannelFailure network update for the channel between
6991         // the node originating the error to its next hop.
6992         match events_5[0] {
6993                 Event::PaymentPathFailed { error_code, failure: PathFailure::OnPath { network_update: Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent }) }, ..
6994                 } => {
6995                         assert_eq!(short_channel_id, chan_2.0.contents.short_channel_id);
6996                         assert!(is_permanent);
6997                         assert_eq!(error_code, Some(0x8000|0x4000|0x2000|4));
6998                 },
6999                 _ => panic!("Unexpected event"),
7000         }
7001         match events_5[1] {
7002                 Event::PaymentFailed { payment_hash, .. } => {
7003                         assert_eq!(payment_hash, our_payment_hash);
7004                 },
7005                 _ => panic!("Unexpected event"),
7006         }
7007
7008         // TODO: Test actual removal of channel from NetworkGraph when it's implemented.
7009 }
7010
7011 fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
7012         // Dust-HTLC failure updates must be delayed until failure-trigger tx (in this case local commitment) reach ANTI_REORG_DELAY
7013         // We can have at most two valid local commitment tx, so both cases must be covered, and both txs must be checked to get them all as
7014         // HTLC could have been removed from lastest local commitment tx but still valid until we get remote RAA
7015
7016         let mut chanmon_cfgs = create_chanmon_cfgs(2);
7017         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
7018         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7019         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7020         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7021         let chan =create_announced_chan_between_nodes(&nodes, 0, 1);
7022
7023         let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
7024                 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
7025
7026         // We route 2 dust-HTLCs between A and B
7027         let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
7028         let (_, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
7029         route_payment(&nodes[0], &[&nodes[1]], 1000000);
7030
7031         // Cache one local commitment tx as previous
7032         let as_prev_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
7033
7034         // Fail one HTLC to prune it in the will-be-latest-local commitment tx
7035         nodes[1].node.fail_htlc_backwards(&payment_hash_2);
7036         check_added_monitors!(nodes[1], 0);
7037         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
7038         check_added_monitors!(nodes[1], 1);
7039
7040         let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
7041         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &remove.update_fail_htlcs[0]);
7042         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &remove.commitment_signed);
7043         check_added_monitors!(nodes[0], 1);
7044
7045         // Cache one local commitment tx as lastest
7046         let as_last_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
7047
7048         let events = nodes[0].node.get_and_clear_pending_msg_events();
7049         match events[0] {
7050                 MessageSendEvent::SendRevokeAndACK { node_id, .. } => {
7051                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
7052                 },
7053                 _ => panic!("Unexpected event"),
7054         }
7055         match events[1] {
7056                 MessageSendEvent::UpdateHTLCs { node_id, .. } => {
7057                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
7058                 },
7059                 _ => panic!("Unexpected event"),
7060         }
7061
7062         assert_ne!(as_prev_commitment_tx, as_last_commitment_tx);
7063         // Fail the 2 dust-HTLCs, move their failure in maturation buffer (htlc_updated_waiting_threshold_conf)
7064         if announce_latest {
7065                 mine_transaction(&nodes[0], &as_last_commitment_tx[0]);
7066         } else {
7067                 mine_transaction(&nodes[0], &as_prev_commitment_tx[0]);
7068         }
7069
7070         check_closed_broadcast!(nodes[0], true);
7071         check_added_monitors!(nodes[0], 1);
7072         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
7073
7074         assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7075         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7076         let events = nodes[0].node.get_and_clear_pending_events();
7077         // Only 2 PaymentPathFailed events should show up, over-dust HTLC has to be failed by timeout tx
7078         assert_eq!(events.len(), 4);
7079         let mut first_failed = false;
7080         for event in events {
7081                 match event {
7082                         Event::PaymentPathFailed { payment_hash, .. } => {
7083                                 if payment_hash == payment_hash_1 {
7084                                         assert!(!first_failed);
7085                                         first_failed = true;
7086                                 } else {
7087                                         assert_eq!(payment_hash, payment_hash_2);
7088                                 }
7089                         },
7090                         Event::PaymentFailed { .. } => {}
7091                         _ => panic!("Unexpected event"),
7092                 }
7093         }
7094 }
7095
7096 #[test]
7097 fn test_failure_delay_dust_htlc_local_commitment() {
7098         do_test_failure_delay_dust_htlc_local_commitment(true);
7099         do_test_failure_delay_dust_htlc_local_commitment(false);
7100 }
7101
7102 fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
7103         // Outbound HTLC-failure updates must be cancelled if we get a reorg before we reach ANTI_REORG_DELAY.
7104         // Broadcast of revoked remote commitment tx, trigger failure-update of dust/non-dust HTLCs
7105         // Broadcast of remote commitment tx, trigger failure-update of dust-HTLCs
7106         // Broadcast of timeout tx on remote commitment tx, trigger failure-udate of non-dust HTLCs
7107         // Broadcast of local commitment tx, trigger failure-update of dust-HTLCs
7108         // Broadcast of HTLC-timeout tx on local commitment tx, trigger failure-update of non-dust HTLCs
7109
7110         let chanmon_cfgs = create_chanmon_cfgs(3);
7111         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
7112         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
7113         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
7114         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
7115
7116         let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
7117                 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
7118
7119         let (_payment_preimage_1, dust_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
7120         let (_payment_preimage_2, non_dust_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
7121
7122         let as_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
7123         let bs_commitment_tx = get_local_commitment_txn!(nodes[1], chan.2);
7124
7125         // We revoked bs_commitment_tx
7126         if revoked {
7127                 let (payment_preimage_3, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
7128                 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
7129         }
7130
7131         let mut timeout_tx = Vec::new();
7132         if local {
7133                 // We fail dust-HTLC 1 by broadcast of local commitment tx
7134                 mine_transaction(&nodes[0], &as_commitment_tx[0]);
7135                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
7136                 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7137                 expect_payment_failed!(nodes[0], dust_hash, false);
7138
7139                 connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS - ANTI_REORG_DELAY);
7140                 check_closed_broadcast!(nodes[0], true);
7141                 check_added_monitors!(nodes[0], 1);
7142                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7143                 timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
7144                 assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
7145                 // We fail non-dust-HTLC 2 by broadcast of local HTLC-timeout tx on local commitment tx
7146                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7147                 mine_transaction(&nodes[0], &timeout_tx[0]);
7148                 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7149                 expect_payment_failed!(nodes[0], non_dust_hash, false);
7150         } else {
7151                 // We fail dust-HTLC 1 by broadcast of remote commitment tx. If revoked, fail also non-dust HTLC
7152                 mine_transaction(&nodes[0], &bs_commitment_tx[0]);
7153                 check_closed_broadcast!(nodes[0], true);
7154                 check_added_monitors!(nodes[0], 1);
7155                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
7156                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7157
7158                 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
7159                 timeout_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().drain(..)
7160                         .filter(|tx| tx.input[0].previous_output.txid == bs_commitment_tx[0].txid()).collect();
7161                 check_spends!(timeout_tx[0], bs_commitment_tx[0]);
7162                 // For both a revoked or non-revoked commitment transaction, after ANTI_REORG_DELAY the
7163                 // dust HTLC should have been failed.
7164                 expect_payment_failed!(nodes[0], dust_hash, false);
7165
7166                 if !revoked {
7167                         assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
7168                 } else {
7169                         assert_eq!(timeout_tx[0].lock_time.to_consensus_u32(), 11);
7170                 }
7171                 // We fail non-dust-HTLC 2 by broadcast of local timeout/revocation-claim tx
7172                 mine_transaction(&nodes[0], &timeout_tx[0]);
7173                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7174                 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7175                 expect_payment_failed!(nodes[0], non_dust_hash, false);
7176         }
7177 }
7178
7179 #[test]
7180 fn test_sweep_outbound_htlc_failure_update() {
7181         do_test_sweep_outbound_htlc_failure_update(false, true);
7182         do_test_sweep_outbound_htlc_failure_update(false, false);
7183         do_test_sweep_outbound_htlc_failure_update(true, false);
7184 }
7185
7186 #[test]
7187 fn test_user_configurable_csv_delay() {
7188         // We test our channel constructors yield errors when we pass them absurd csv delay
7189
7190         let mut low_our_to_self_config = UserConfig::default();
7191         low_our_to_self_config.channel_handshake_config.our_to_self_delay = 6;
7192         let mut high_their_to_self_config = UserConfig::default();
7193         high_their_to_self_config.channel_handshake_limits.their_to_self_delay = 100;
7194         let user_cfgs = [Some(high_their_to_self_config.clone()), None];
7195         let chanmon_cfgs = create_chanmon_cfgs(2);
7196         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7197         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
7198         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7199
7200         // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in OutboundV1Channel::new()
7201         if let Err(error) = OutboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
7202                 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[1].node.init_features(), 1000000, 1000000, 0,
7203                 &low_our_to_self_config, 0, 42, None)
7204         {
7205                 match error {
7206                         APIError::APIMisuseError { err } => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); },
7207                         _ => panic!("Unexpected event"),
7208                 }
7209         } else { assert!(false) }
7210
7211         // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in InboundV1Channel::new()
7212         nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
7213         let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
7214         open_channel.common_fields.to_self_delay = 200;
7215         if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
7216                 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
7217                 &low_our_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
7218         {
7219                 match error {
7220                         ChannelError::Close(err) => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str()));  },
7221                         _ => panic!("Unexpected event"),
7222                 }
7223         } else { assert!(false); }
7224
7225         // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Chanel::accept_channel()
7226         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
7227         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
7228         let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
7229         accept_channel.common_fields.to_self_delay = 200;
7230         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
7231         let reason_msg;
7232         if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] {
7233                 match action {
7234                         &ErrorAction::SendErrorMessage { ref msg } => {
7235                                 assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(msg.data.as_str()));
7236                                 reason_msg = msg.data.clone();
7237                         },
7238                         _ => { panic!(); }
7239                 }
7240         } else { panic!(); }
7241         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg }, [nodes[1].node.get_our_node_id()], 1000000);
7242
7243         // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new()
7244         nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
7245         let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
7246         open_channel.common_fields.to_self_delay = 200;
7247         if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
7248                 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
7249                 &high_their_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
7250         {
7251                 match error {
7252                         ChannelError::Close(err) => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(err.as_str())); },
7253                         _ => panic!("Unexpected event"),
7254                 }
7255         } else { assert!(false); }
7256 }
7257
7258 #[test]
7259 fn test_check_htlc_underpaying() {
7260         // Send payment through A -> B but A is maliciously
7261         // sending a probe payment (i.e less than expected value0
7262         // to B, B should refuse payment.
7263
7264         let chanmon_cfgs = create_chanmon_cfgs(2);
7265         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7266         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7267         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7268
7269         // Create some initial channels
7270         create_announced_chan_between_nodes(&nodes, 0, 1);
7271
7272         let scorer = test_utils::TestScorer::new();
7273         let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
7274         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
7275                 .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
7276         let route_params = RouteParameters::from_payment_params_and_value(payment_params, 10_000);
7277         let route = get_route(&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(),
7278                 None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
7279         let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]);
7280         let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, None).unwrap();
7281         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
7282                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
7283         check_added_monitors!(nodes[0], 1);
7284
7285         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
7286         assert_eq!(events.len(), 1);
7287         let mut payment_event = SendEvent::from_event(events.pop().unwrap());
7288         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
7289         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
7290
7291         // Note that we first have to wait a random delay before processing the receipt of the HTLC,
7292         // and then will wait a second random delay before failing the HTLC back:
7293         expect_pending_htlcs_forwardable!(nodes[1]);
7294         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
7295
7296         // Node 3 is expecting payment of 100_000 but received 10_000,
7297         // it should fail htlc like we didn't know the preimage.
7298         nodes[1].node.process_pending_htlc_forwards();
7299
7300         let events = nodes[1].node.get_and_clear_pending_msg_events();
7301         assert_eq!(events.len(), 1);
7302         let (update_fail_htlc, commitment_signed) = match events[0] {
7303                 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
7304                         assert!(update_add_htlcs.is_empty());
7305                         assert!(update_fulfill_htlcs.is_empty());
7306                         assert_eq!(update_fail_htlcs.len(), 1);
7307                         assert!(update_fail_malformed_htlcs.is_empty());
7308                         assert!(update_fee.is_none());
7309                         (update_fail_htlcs[0].clone(), commitment_signed)
7310                 },
7311                 _ => panic!("Unexpected event"),
7312         };
7313         check_added_monitors!(nodes[1], 1);
7314
7315         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlc);
7316         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
7317
7318         // 10_000 msat as u64, followed by a height of CHAN_CONFIRM_DEPTH as u32
7319         let mut expected_failure_data = (10_000 as u64).to_be_bytes().to_vec();
7320         expected_failure_data.extend_from_slice(&CHAN_CONFIRM_DEPTH.to_be_bytes());
7321         expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000|15, &expected_failure_data[..]);
7322 }
7323
7324 #[test]
7325 fn test_announce_disable_channels() {
7326         // Create 2 channels between A and B. Disconnect B. Call timer_tick_occurred and check for generated
7327         // ChannelUpdate. Reconnect B, reestablish and check there is non-generated ChannelUpdate.
7328
7329         let chanmon_cfgs = create_chanmon_cfgs(2);
7330         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7331         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7332         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7333
7334         // Connect a dummy node for proper future events broadcasting
7335         connect_dummy_node(&nodes[0]);
7336
7337         create_announced_chan_between_nodes(&nodes, 0, 1);
7338         create_announced_chan_between_nodes(&nodes, 1, 0);
7339         create_announced_chan_between_nodes(&nodes, 0, 1);
7340
7341         // Disconnect peers
7342         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
7343         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
7344
7345         for _ in 0..DISABLE_GOSSIP_TICKS + 1 {
7346                 nodes[0].node.timer_tick_occurred();
7347         }
7348         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
7349         assert_eq!(msg_events.len(), 3);
7350         let mut chans_disabled = new_hash_map();
7351         for e in msg_events {
7352                 match e {
7353                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
7354                                 assert_eq!(msg.contents.flags & (1<<1), 1<<1); // The "channel disabled" bit should be set
7355                                 // Check that each channel gets updated exactly once
7356                                 if chans_disabled.insert(msg.contents.short_channel_id, msg.contents.timestamp).is_some() {
7357                                         panic!("Generated ChannelUpdate for wrong chan!");
7358                                 }
7359                         },
7360                         _ => panic!("Unexpected event"),
7361                 }
7362         }
7363         // Reconnect peers
7364         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
7365                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
7366         }, true).unwrap();
7367         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
7368         assert_eq!(reestablish_1.len(), 3);
7369         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
7370                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
7371         }, false).unwrap();
7372         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
7373         assert_eq!(reestablish_2.len(), 3);
7374
7375         // Reestablish chan_1
7376         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
7377         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7378         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
7379         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7380         // Reestablish chan_2
7381         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[1]);
7382         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7383         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[1]);
7384         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7385         // Reestablish chan_3
7386         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[2]);
7387         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7388         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[2]);
7389         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7390
7391         for _ in 0..ENABLE_GOSSIP_TICKS {
7392                 nodes[0].node.timer_tick_occurred();
7393         }
7394         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7395         nodes[0].node.timer_tick_occurred();
7396         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
7397         assert_eq!(msg_events.len(), 3);
7398         for e in msg_events {
7399                 match e {
7400                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
7401                                 assert_eq!(msg.contents.flags & (1<<1), 0); // The "channel disabled" bit should be off
7402                                 match chans_disabled.remove(&msg.contents.short_channel_id) {
7403                                         // Each update should have a higher timestamp than the previous one, replacing
7404                                         // the old one.
7405                                         Some(prev_timestamp) => assert!(msg.contents.timestamp > prev_timestamp),
7406                                         None => panic!("Generated ChannelUpdate for wrong chan!"),
7407                                 }
7408                         },
7409                         _ => panic!("Unexpected event"),
7410                 }
7411         }
7412         // Check that each channel gets updated exactly once
7413         assert!(chans_disabled.is_empty());
7414 }
7415
7416 #[test]
7417 fn test_bump_penalty_txn_on_revoked_commitment() {
7418         // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to be sure
7419         // we're able to claim outputs on revoked commitment transaction before timelocks expiration
7420
7421         let chanmon_cfgs = create_chanmon_cfgs(2);
7422         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7423         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7424         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7425
7426         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7427
7428         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
7429         let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 30)
7430                 .with_bolt11_features(nodes[0].node.bolt11_invoice_features()).unwrap();
7431         let (route,_, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], payment_params, 3000000);
7432         send_along_route(&nodes[1], route, &vec!(&nodes[0])[..], 3000000);
7433
7434         let revoked_txn = get_local_commitment_txn!(nodes[0], chan.2);
7435         // Revoked commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
7436         assert_eq!(revoked_txn[0].output.len(), 4);
7437         assert_eq!(revoked_txn[0].input.len(), 1);
7438         assert_eq!(revoked_txn[0].input[0].previous_output.txid, chan.3.txid());
7439         let revoked_txid = revoked_txn[0].txid();
7440
7441         let mut penalty_sum = 0;
7442         for outp in revoked_txn[0].output.iter() {
7443                 if outp.script_pubkey.is_v0_p2wsh() {
7444                         penalty_sum += outp.value;
7445                 }
7446         }
7447
7448         // Connect blocks to change height_timer range to see if we use right soonest_timelock
7449         let header_114 = connect_blocks(&nodes[1], 14);
7450
7451         // Actually revoke tx by claiming a HTLC
7452         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
7453         connect_block(&nodes[1], &create_dummy_block(header_114, 42, vec![revoked_txn[0].clone()]));
7454         check_added_monitors!(nodes[1], 1);
7455
7456         // One or more justice tx should have been broadcast, check it
7457         let penalty_1;
7458         let feerate_1;
7459         {
7460                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7461                 assert_eq!(node_txn.len(), 1); // justice tx (broadcasted from ChannelMonitor)
7462                 assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7463                 assert_eq!(node_txn[0].output.len(), 1);
7464                 check_spends!(node_txn[0], revoked_txn[0]);
7465                 let fee_1 = penalty_sum - node_txn[0].output[0].value;
7466                 feerate_1 = fee_1 * 1000 / node_txn[0].weight().to_wu();
7467                 penalty_1 = node_txn[0].txid();
7468                 node_txn.clear();
7469         };
7470
7471         // After exhaustion of height timer, a new bumped justice tx should have been broadcast, check it
7472         connect_blocks(&nodes[1], 15);
7473         let mut penalty_2 = penalty_1;
7474         let mut feerate_2 = 0;
7475         {
7476                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7477                 assert_eq!(node_txn.len(), 1);
7478                 if node_txn[0].input[0].previous_output.txid == revoked_txid {
7479                         assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7480                         assert_eq!(node_txn[0].output.len(), 1);
7481                         check_spends!(node_txn[0], revoked_txn[0]);
7482                         penalty_2 = node_txn[0].txid();
7483                         // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
7484                         assert_ne!(penalty_2, penalty_1);
7485                         let fee_2 = penalty_sum - node_txn[0].output[0].value;
7486                         feerate_2 = fee_2 * 1000 / node_txn[0].weight().to_wu();
7487                         // Verify 25% bump heuristic
7488                         assert!(feerate_2 * 100 >= feerate_1 * 125);
7489                         node_txn.clear();
7490                 }
7491         }
7492         assert_ne!(feerate_2, 0);
7493
7494         // After exhaustion of height timer for a 2nd time, a new bumped justice tx should have been broadcast, check it
7495         connect_blocks(&nodes[1], 1);
7496         let penalty_3;
7497         let mut feerate_3 = 0;
7498         {
7499                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7500                 assert_eq!(node_txn.len(), 1);
7501                 if node_txn[0].input[0].previous_output.txid == revoked_txid {
7502                         assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7503                         assert_eq!(node_txn[0].output.len(), 1);
7504                         check_spends!(node_txn[0], revoked_txn[0]);
7505                         penalty_3 = node_txn[0].txid();
7506                         // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
7507                         assert_ne!(penalty_3, penalty_2);
7508                         let fee_3 = penalty_sum - node_txn[0].output[0].value;
7509                         feerate_3 = fee_3 * 1000 / node_txn[0].weight().to_wu();
7510                         // Verify 25% bump heuristic
7511                         assert!(feerate_3 * 100 >= feerate_2 * 125);
7512                         node_txn.clear();
7513                 }
7514         }
7515         assert_ne!(feerate_3, 0);
7516
7517         nodes[1].node.get_and_clear_pending_events();
7518         nodes[1].node.get_and_clear_pending_msg_events();
7519 }
7520
7521 #[test]
7522 fn test_bump_penalty_txn_on_revoked_htlcs() {
7523         // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to sure
7524         // we're able to claim outputs on revoked HTLC transactions before timelocks expiration
7525
7526         let mut chanmon_cfgs = create_chanmon_cfgs(2);
7527         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
7528         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7529         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7530         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7531
7532         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7533         // Lock HTLC in both directions (using a slightly lower CLTV delay to provide timely RBF bumps)
7534         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 50).with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
7535         let scorer = test_utils::TestScorer::new();
7536         let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
7537         let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
7538         let route = get_route(&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(), None,
7539                 nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
7540         let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 3_000_000).0;
7541         let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 50)
7542                 .with_bolt11_features(nodes[0].node.bolt11_invoice_features()).unwrap();
7543         let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
7544         let route = get_route(&nodes[1].node.get_our_node_id(), &route_params, &nodes[1].network_graph.read_only(), None,
7545                 nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
7546         let failed_payment_hash = send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000).1;
7547
7548         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
7549         assert_eq!(revoked_local_txn[0].input.len(), 1);
7550         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
7551
7552         // Revoke local commitment tx
7553         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
7554
7555         // B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx
7556         connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone()]));
7557         check_closed_broadcast!(nodes[1], true);
7558         check_added_monitors!(nodes[1], 1);
7559         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000);
7560         connect_blocks(&nodes[1], 50); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
7561
7562         let revoked_htlc_txn = {
7563                 let txn = nodes[1].tx_broadcaster.unique_txn_broadcast();
7564                 assert_eq!(txn.len(), 2);
7565
7566                 assert_eq!(txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
7567                 assert_eq!(txn[0].input.len(), 1);
7568                 check_spends!(txn[0], revoked_local_txn[0]);
7569
7570                 assert_eq!(txn[1].input.len(), 1);
7571                 assert_eq!(txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
7572                 assert_eq!(txn[1].output.len(), 1);
7573                 check_spends!(txn[1], revoked_local_txn[0]);
7574
7575                 txn
7576         };
7577
7578         // Broadcast set of revoked txn on A
7579         let hash_128 = connect_blocks(&nodes[0], 40);
7580         let block_11 = create_dummy_block(hash_128, 42, vec![revoked_local_txn[0].clone()]);
7581         connect_block(&nodes[0], &block_11);
7582         let block_129 = create_dummy_block(block_11.block_hash(), 42, vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()]);
7583         connect_block(&nodes[0], &block_129);
7584         let events = nodes[0].node.get_and_clear_pending_events();
7585         expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: failed_payment_hash }]);
7586         match events.last().unwrap() {
7587                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
7588                 _ => panic!("Unexpected event"),
7589         }
7590         let first;
7591         let feerate_1;
7592         let penalty_txn;
7593         {
7594                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7595                 assert_eq!(node_txn.len(), 4); // 3 penalty txn on revoked commitment tx + 1 penalty tnx on revoked HTLC txn
7596                 // Verify claim tx are spending revoked HTLC txn
7597
7598                 // node_txn 0-2 each spend a separate revoked output from revoked_local_txn[0]
7599                 // Note that node_txn[0] and node_txn[1] are bogus - they double spend the revoked_htlc_txn
7600                 // which are included in the same block (they are broadcasted because we scan the
7601                 // transactions linearly and generate claims as we go, they likely should be removed in the
7602                 // future).
7603                 assert_eq!(node_txn[0].input.len(), 1);
7604                 check_spends!(node_txn[0], revoked_local_txn[0]);
7605                 assert_eq!(node_txn[1].input.len(), 1);
7606                 check_spends!(node_txn[1], revoked_local_txn[0]);
7607                 assert_eq!(node_txn[2].input.len(), 1);
7608                 check_spends!(node_txn[2], revoked_local_txn[0]);
7609
7610                 // Each of the three justice transactions claim a separate (single) output of the three
7611                 // available, which we check here:
7612                 assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
7613                 assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output);
7614                 assert_ne!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output);
7615
7616                 assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[1].input[0].previous_output);
7617                 assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
7618
7619                 // node_txn[3] spends the revoked outputs from the revoked_htlc_txn (which only have one
7620                 // output, checked above).
7621                 assert_eq!(node_txn[3].input.len(), 2);
7622                 assert_eq!(node_txn[3].output.len(), 1);
7623                 check_spends!(node_txn[3], revoked_htlc_txn[0], revoked_htlc_txn[1]);
7624
7625                 first = node_txn[3].txid();
7626                 // Store both feerates for later comparison
7627                 let fee_1 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[3].output[0].value;
7628                 feerate_1 = fee_1 * 1000 / node_txn[3].weight().to_wu();
7629                 penalty_txn = vec![node_txn[2].clone()];
7630                 node_txn.clear();
7631         }
7632
7633         // Connect one more block to see if bumped penalty are issued for HTLC txn
7634         let block_130 = create_dummy_block(block_129.block_hash(), 42, penalty_txn);
7635         connect_block(&nodes[0], &block_130);
7636         let block_131 = create_dummy_block(block_130.block_hash(), 42, Vec::new());
7637         connect_block(&nodes[0], &block_131);
7638
7639         // Few more blocks to confirm penalty txn
7640         connect_blocks(&nodes[0], 4);
7641         assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
7642         let header_144 = connect_blocks(&nodes[0], 9);
7643         let node_txn = {
7644                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7645                 assert_eq!(node_txn.len(), 1);
7646
7647                 assert_eq!(node_txn[0].input.len(), 2);
7648                 check_spends!(node_txn[0], revoked_htlc_txn[0], revoked_htlc_txn[1]);
7649                 // Verify bumped tx is different and 25% bump heuristic
7650                 assert_ne!(first, node_txn[0].txid());
7651                 let fee_2 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[0].output[0].value;
7652                 let feerate_2 = fee_2 * 1000 / node_txn[0].weight().to_wu();
7653                 assert!(feerate_2 * 100 > feerate_1 * 125);
7654                 let txn = vec![node_txn[0].clone()];
7655                 node_txn.clear();
7656                 txn
7657         };
7658         // Broadcast claim txn and confirm blocks to avoid further bumps on this outputs
7659         connect_block(&nodes[0], &create_dummy_block(header_144, 42, node_txn));
7660         connect_blocks(&nodes[0], 20);
7661         {
7662                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7663                 // We verify than no new transaction has been broadcast because previously
7664                 // we were buggy on this exact behavior by not tracking for monitoring remote HTLC outputs (see #411)
7665                 // which means we wouldn't see a spend of them by a justice tx and bumped justice tx
7666                 // were generated forever instead of safe cleaning after confirmation and ANTI_REORG_SAFE_DELAY blocks.
7667                 // Enforce spending of revoked htlc output by claiming transaction remove request as expected and dry
7668                 // up bumped justice generation.
7669                 assert_eq!(node_txn.len(), 0);
7670                 node_txn.clear();
7671         }
7672         check_closed_broadcast!(nodes[0], true);
7673         check_added_monitors!(nodes[0], 1);
7674 }
7675
7676 #[test]
7677 fn test_bump_penalty_txn_on_remote_commitment() {
7678         // In case of claim txn with too low feerates for getting into mempools, RBF-bump them to be sure
7679         // we're able to claim outputs on remote commitment transaction before timelocks expiration
7680
7681         // Create 2 HTLCs
7682         // Provide preimage for one
7683         // Check aggregation
7684
7685         let chanmon_cfgs = create_chanmon_cfgs(2);
7686         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7687         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7688         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7689
7690         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7691         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
7692         route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
7693
7694         // Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
7695         let remote_txn = get_local_commitment_txn!(nodes[0], chan.2);
7696         assert_eq!(remote_txn[0].output.len(), 4);
7697         assert_eq!(remote_txn[0].input.len(), 1);
7698         assert_eq!(remote_txn[0].input[0].previous_output.txid, chan.3.txid());
7699
7700         // Claim a HTLC without revocation (provide B monitor with preimage)
7701         nodes[1].node.claim_funds(payment_preimage);
7702         expect_payment_claimed!(nodes[1], payment_hash, 3_000_000);
7703         mine_transaction(&nodes[1], &remote_txn[0]);
7704         check_added_monitors!(nodes[1], 2);
7705         connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
7706
7707         // One or more claim tx should have been broadcast, check it
7708         let timeout;
7709         let preimage;
7710         let preimage_bump;
7711         let feerate_timeout;
7712         let feerate_preimage;
7713         {
7714                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7715                 // 3 transactions including:
7716                 //   preimage and timeout sweeps from remote commitment + preimage sweep bump
7717                 assert_eq!(node_txn.len(), 3);
7718                 assert_eq!(node_txn[0].input.len(), 1);
7719                 assert_eq!(node_txn[1].input.len(), 1);
7720                 assert_eq!(node_txn[2].input.len(), 1);
7721                 check_spends!(node_txn[0], remote_txn[0]);
7722                 check_spends!(node_txn[1], remote_txn[0]);
7723                 check_spends!(node_txn[2], remote_txn[0]);
7724
7725                 preimage = node_txn[0].txid();
7726                 let index = node_txn[0].input[0].previous_output.vout;
7727                 let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
7728                 feerate_preimage = fee * 1000 / node_txn[0].weight().to_wu();
7729
7730                 let (preimage_bump_tx, timeout_tx) = if node_txn[2].input[0].previous_output == node_txn[0].input[0].previous_output {
7731                         (node_txn[2].clone(), node_txn[1].clone())
7732                 } else {
7733                         (node_txn[1].clone(), node_txn[2].clone())
7734                 };
7735
7736                 preimage_bump = preimage_bump_tx;
7737                 check_spends!(preimage_bump, remote_txn[0]);
7738                 assert_eq!(node_txn[0].input[0].previous_output, preimage_bump.input[0].previous_output);
7739
7740                 timeout = timeout_tx.txid();
7741                 let index = timeout_tx.input[0].previous_output.vout;
7742                 let fee = remote_txn[0].output[index as usize].value - timeout_tx.output[0].value;
7743                 feerate_timeout = fee * 1000 / timeout_tx.weight().to_wu();
7744
7745                 node_txn.clear();
7746         };
7747         assert_ne!(feerate_timeout, 0);
7748         assert_ne!(feerate_preimage, 0);
7749
7750         // After exhaustion of height timer, new bumped claim txn should have been broadcast, check it
7751         connect_blocks(&nodes[1], 1);
7752         {
7753                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7754                 assert_eq!(node_txn.len(), 1);
7755                 assert_eq!(node_txn[0].input.len(), 1);
7756                 assert_eq!(preimage_bump.input.len(), 1);
7757                 check_spends!(node_txn[0], remote_txn[0]);
7758                 check_spends!(preimage_bump, remote_txn[0]);
7759
7760                 let index = preimage_bump.input[0].previous_output.vout;
7761                 let fee = remote_txn[0].output[index as usize].value - preimage_bump.output[0].value;
7762                 let new_feerate = fee * 1000 / preimage_bump.weight().to_wu();
7763                 assert!(new_feerate * 100 > feerate_timeout * 125);
7764                 assert_ne!(timeout, preimage_bump.txid());
7765
7766                 let index = node_txn[0].input[0].previous_output.vout;
7767                 let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
7768                 let new_feerate = fee * 1000 / node_txn[0].weight().to_wu();
7769                 assert!(new_feerate * 100 > feerate_preimage * 125);
7770                 assert_ne!(preimage, node_txn[0].txid());
7771
7772                 node_txn.clear();
7773         }
7774
7775         nodes[1].node.get_and_clear_pending_events();
7776         nodes[1].node.get_and_clear_pending_msg_events();
7777 }
7778
7779 #[test]
7780 fn test_counterparty_raa_skip_no_crash() {
7781         // Previously, if our counterparty sent two RAAs in a row without us having provided a
7782         // commitment transaction, we would have happily carried on and provided them the next
7783         // commitment transaction based on one RAA forward. This would probably eventually have led to
7784         // channel closure, but it would not have resulted in funds loss. Still, our
7785         // TestChannelSigner would have panicked as it doesn't like jumps into the future. Here, we
7786         // check simply that the channel is closed in response to such an RAA, but don't check whether
7787         // we decide to punish our counterparty for revoking their funds (as we don't currently
7788         // implement that).
7789         let chanmon_cfgs = create_chanmon_cfgs(2);
7790         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7791         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7792         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7793         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
7794
7795         let per_commitment_secret;
7796         let next_per_commitment_point;
7797         {
7798                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
7799                 let mut guard = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
7800                 let keys = guard.channel_by_id.get_mut(&channel_id).map(
7801                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
7802                 ).flatten().unwrap().get_signer();
7803
7804                 const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
7805
7806                 // Make signer believe we got a counterparty signature, so that it allows the revocation
7807                 keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
7808                 per_commitment_secret = keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
7809
7810                 // Must revoke without gaps
7811                 keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
7812                 keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
7813
7814                 keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
7815                 next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
7816                         &SecretKey::from_slice(&keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
7817         }
7818
7819         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(),
7820                 &msgs::RevokeAndACK {
7821                         channel_id,
7822                         per_commitment_secret,
7823                         next_per_commitment_point,
7824                         #[cfg(taproot)]
7825                         next_local_nonce: None,
7826                 });
7827         assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
7828         check_added_monitors!(nodes[1], 1);
7829         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() }
7830                 , [nodes[0].node.get_our_node_id()], 100000);
7831 }
7832
7833 #[test]
7834 fn test_bump_txn_sanitize_tracking_maps() {
7835         // Sanitizing pendning_claim_request and claimable_outpoints used to be buggy,
7836         // verify we clean then right after expiration of ANTI_REORG_DELAY.
7837
7838         let chanmon_cfgs = create_chanmon_cfgs(2);
7839         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7840         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7841         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7842
7843         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7844         // Lock HTLC in both directions
7845         let (payment_preimage_1, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000);
7846         let (_, payment_hash_2, ..) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000);
7847
7848         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
7849         assert_eq!(revoked_local_txn[0].input.len(), 1);
7850         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
7851
7852         // Revoke local commitment tx
7853         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
7854
7855         // Broadcast set of revoked txn on A
7856         connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH);
7857         expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[0], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
7858         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
7859
7860         mine_transaction(&nodes[0], &revoked_local_txn[0]);
7861         check_closed_broadcast!(nodes[0], true);
7862         check_added_monitors!(nodes[0], 1);
7863         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 1000000);
7864         let penalty_txn = {
7865                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7866                 assert_eq!(node_txn.len(), 3); //ChannelMonitor: justice txn * 3
7867                 check_spends!(node_txn[0], revoked_local_txn[0]);
7868                 check_spends!(node_txn[1], revoked_local_txn[0]);
7869                 check_spends!(node_txn[2], revoked_local_txn[0]);
7870                 let penalty_txn = vec![node_txn[0].clone(), node_txn[1].clone(), node_txn[2].clone()];
7871                 node_txn.clear();
7872                 penalty_txn
7873         };
7874         connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, penalty_txn));
7875         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7876         {
7877                 let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(OutPoint { txid: chan.3.txid(), index: 0 }).unwrap();
7878                 assert!(monitor.inner.lock().unwrap().onchain_tx_handler.pending_claim_requests.is_empty());
7879                 assert!(monitor.inner.lock().unwrap().onchain_tx_handler.claimable_outpoints.is_empty());
7880         }
7881 }
7882
7883 #[test]
7884 fn test_channel_conf_timeout() {
7885         // Tests that, for inbound channels, we give up on them if the funding transaction does not
7886         // confirm within 2016 blocks, as recommended by BOLT 2.
7887         let chanmon_cfgs = create_chanmon_cfgs(2);
7888         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7889         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7890         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7891
7892         let _funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 100_000);
7893
7894         // The outbound node should wait forever for confirmation:
7895         // This matches `channel::FUNDING_CONF_DEADLINE_BLOCKS` and BOLT 2's suggested timeout, thus is
7896         // copied here instead of directly referencing the constant.
7897         connect_blocks(&nodes[0], 2016);
7898         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7899
7900         // The inbound node should fail the channel after exactly 2016 blocks
7901         connect_blocks(&nodes[1], 2015);
7902         check_added_monitors!(nodes[1], 0);
7903         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7904
7905         connect_blocks(&nodes[1], 1);
7906         check_added_monitors!(nodes[1], 1);
7907         check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut, [nodes[0].node.get_our_node_id()], 1000000);
7908         let close_ev = nodes[1].node.get_and_clear_pending_msg_events();
7909         assert_eq!(close_ev.len(), 1);
7910         match close_ev[0] {
7911                 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { ref msg }, ref node_id } => {
7912                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
7913                         assert_eq!(msg.as_ref().unwrap().data, "Channel closed because funding transaction failed to confirm within 2016 blocks");
7914                 },
7915                 _ => panic!("Unexpected event"),
7916         }
7917 }
7918
7919 #[test]
7920 fn test_override_channel_config() {
7921         let chanmon_cfgs = create_chanmon_cfgs(2);
7922         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7923         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7924         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7925
7926         // Node0 initiates a channel to node1 using the override config.
7927         let mut override_config = UserConfig::default();
7928         override_config.channel_handshake_config.our_to_self_delay = 200;
7929
7930         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, None, Some(override_config)).unwrap();
7931
7932         // Assert the channel created by node0 is using the override config.
7933         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7934         assert_eq!(res.common_fields.channel_flags, 0);
7935         assert_eq!(res.common_fields.to_self_delay, 200);
7936 }
7937
7938 #[test]
7939 fn test_override_0msat_htlc_minimum() {
7940         let mut zero_config = UserConfig::default();
7941         zero_config.channel_handshake_config.our_htlc_minimum_msat = 0;
7942         let chanmon_cfgs = create_chanmon_cfgs(2);
7943         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7944         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(zero_config.clone())]);
7945         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7946
7947         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, None, Some(zero_config)).unwrap();
7948         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7949         assert_eq!(res.common_fields.htlc_minimum_msat, 1);
7950
7951         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
7952         let res = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
7953         assert_eq!(res.common_fields.htlc_minimum_msat, 1);
7954 }
7955
7956 #[test]
7957 fn test_channel_update_has_correct_htlc_maximum_msat() {
7958         // Tests that the `ChannelUpdate` message has the correct values for `htlc_maximum_msat` set.
7959         // Bolt 7 specifies that if present `htlc_maximum_msat`:
7960         // 1. MUST be set to less than or equal to the channel capacity. In LDK, this is capped to
7961         // 90% of the `channel_value`.
7962         // 2. MUST be set to less than or equal to the `max_htlc_value_in_flight_msat` received from the peer.
7963
7964         let mut config_30_percent = UserConfig::default();
7965         config_30_percent.channel_handshake_config.announced_channel = true;
7966         config_30_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 30;
7967         let mut config_50_percent = UserConfig::default();
7968         config_50_percent.channel_handshake_config.announced_channel = true;
7969         config_50_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 50;
7970         let mut config_95_percent = UserConfig::default();
7971         config_95_percent.channel_handshake_config.announced_channel = true;
7972         config_95_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 95;
7973         let mut config_100_percent = UserConfig::default();
7974         config_100_percent.channel_handshake_config.announced_channel = true;
7975         config_100_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100;
7976
7977         let chanmon_cfgs = create_chanmon_cfgs(4);
7978         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
7979         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[Some(config_30_percent), Some(config_50_percent), Some(config_95_percent), Some(config_100_percent)]);
7980         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
7981
7982         let channel_value_satoshis = 100000;
7983         let channel_value_msat = channel_value_satoshis * 1000;
7984         let channel_value_30_percent_msat = (channel_value_msat as f64 * 0.3) as u64;
7985         let channel_value_50_percent_msat = (channel_value_msat as f64 * 0.5) as u64;
7986         let channel_value_90_percent_msat = (channel_value_msat as f64 * 0.9) as u64;
7987
7988         let (node_0_chan_update, node_1_chan_update, _, _)  = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value_satoshis, 10001);
7989         let (node_2_chan_update, node_3_chan_update, _, _)  = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, channel_value_satoshis, 10001);
7990
7991         // Assert that `node[0]`'s `ChannelUpdate` is capped at 50 percent of the `channel_value`, as
7992         // that's the value of `node[1]`'s `holder_max_htlc_value_in_flight_msat`.
7993         assert_eq!(node_0_chan_update.contents.htlc_maximum_msat, channel_value_50_percent_msat);
7994         // Assert that `node[1]`'s `ChannelUpdate` is capped at 30 percent of the `channel_value`, as
7995         // that's the value of `node[0]`'s `holder_max_htlc_value_in_flight_msat`.
7996         assert_eq!(node_1_chan_update.contents.htlc_maximum_msat, channel_value_30_percent_msat);
7997
7998         // Assert that `node[2]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
7999         // the value of `node[3]`'s `holder_max_htlc_value_in_flight_msat` (100%), exceeds 90% of the
8000         // `channel_value`.
8001         assert_eq!(node_2_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
8002         // Assert that `node[3]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
8003         // the value of `node[2]`'s `holder_max_htlc_value_in_flight_msat` (95%), exceeds 90% of the
8004         // `channel_value`.
8005         assert_eq!(node_3_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
8006 }
8007
8008 #[test]
8009 fn test_manually_accept_inbound_channel_request() {
8010         let mut manually_accept_conf = UserConfig::default();
8011         manually_accept_conf.manually_accept_inbound_channels = true;
8012         let chanmon_cfgs = create_chanmon_cfgs(2);
8013         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8014         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
8015         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8016
8017         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap();
8018         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8019
8020         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
8021
8022         // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
8023         // accepting the inbound channel request.
8024         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
8025
8026         let events = nodes[1].node.get_and_clear_pending_events();
8027         match events[0] {
8028                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
8029                         nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23).unwrap();
8030                 }
8031                 _ => panic!("Unexpected event"),
8032         }
8033
8034         let accept_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
8035         assert_eq!(accept_msg_ev.len(), 1);
8036
8037         match accept_msg_ev[0] {
8038                 MessageSendEvent::SendAcceptChannel { ref node_id, .. } => {
8039                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8040                 }
8041                 _ => panic!("Unexpected event"),
8042         }
8043
8044         nodes[1].node.force_close_broadcasting_latest_txn(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
8045
8046         let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
8047         assert_eq!(close_msg_ev.len(), 1);
8048
8049         let events = nodes[1].node.get_and_clear_pending_events();
8050         match events[0] {
8051                 Event::ChannelClosed { user_channel_id, .. } => {
8052                         assert_eq!(user_channel_id, 23);
8053                 }
8054                 _ => panic!("Unexpected event"),
8055         }
8056 }
8057
8058 #[test]
8059 fn test_manually_reject_inbound_channel_request() {
8060         let mut manually_accept_conf = UserConfig::default();
8061         manually_accept_conf.manually_accept_inbound_channels = true;
8062         let chanmon_cfgs = create_chanmon_cfgs(2);
8063         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8064         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
8065         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8066
8067         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap();
8068         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8069
8070         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
8071
8072         // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
8073         // rejecting the inbound channel request.
8074         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
8075
8076         let events = nodes[1].node.get_and_clear_pending_events();
8077         match events[0] {
8078                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
8079                         nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
8080                 }
8081                 _ => panic!("Unexpected event"),
8082         }
8083
8084         let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
8085         assert_eq!(close_msg_ev.len(), 1);
8086
8087         match close_msg_ev[0] {
8088                 MessageSendEvent::HandleError { ref node_id, .. } => {
8089                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8090                 }
8091                 _ => panic!("Unexpected event"),
8092         }
8093
8094         // There should be no more events to process, as the channel was never opened.
8095         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
8096 }
8097
8098 #[test]
8099 fn test_can_not_accept_inbound_channel_twice() {
8100         let mut manually_accept_conf = UserConfig::default();
8101         manually_accept_conf.manually_accept_inbound_channels = true;
8102         let chanmon_cfgs = create_chanmon_cfgs(2);
8103         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8104         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
8105         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8106
8107         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap();
8108         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8109
8110         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
8111
8112         // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
8113         // accepting the inbound channel request.
8114         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
8115
8116         let events = nodes[1].node.get_and_clear_pending_events();
8117         match events[0] {
8118                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
8119                         nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
8120                         let api_res = nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0);
8121                         match api_res {
8122                                 Err(APIError::APIMisuseError { err }) => {
8123                                         assert_eq!(err, "No such channel awaiting to be accepted.");
8124                                 },
8125                                 Ok(_) => panic!("Channel shouldn't be possible to be accepted twice"),
8126                                 Err(e) => panic!("Unexpected Error {:?}", e),
8127                         }
8128                 }
8129                 _ => panic!("Unexpected event"),
8130         }
8131
8132         // Ensure that the channel wasn't closed after attempting to accept it twice.
8133         let accept_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
8134         assert_eq!(accept_msg_ev.len(), 1);
8135
8136         match accept_msg_ev[0] {
8137                 MessageSendEvent::SendAcceptChannel { ref node_id, .. } => {
8138                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8139                 }
8140                 _ => panic!("Unexpected event"),
8141         }
8142 }
8143
8144 #[test]
8145 fn test_can_not_accept_unknown_inbound_channel() {
8146         let chanmon_cfg = create_chanmon_cfgs(2);
8147         let node_cfg = create_node_cfgs(2, &chanmon_cfg);
8148         let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
8149         let nodes = create_network(2, &node_cfg, &node_chanmgr);
8150
8151         let unknown_channel_id = ChannelId::new_zero();
8152         let api_res = nodes[0].node.accept_inbound_channel(&unknown_channel_id, &nodes[1].node.get_our_node_id(), 0);
8153         match api_res {
8154                 Err(APIError::APIMisuseError { err }) => {
8155                         assert_eq!(err, "No such channel awaiting to be accepted.");
8156                 },
8157                 Ok(_) => panic!("It shouldn't be possible to accept an unkown channel"),
8158                 Err(e) => panic!("Unexpected Error: {:?}", e),
8159         }
8160 }
8161
8162 #[test]
8163 fn test_onion_value_mpp_set_calculation() {
8164         // Test that we use the onion value `amt_to_forward` when
8165         // calculating whether we've reached the `total_msat` of an MPP
8166         // by having a routing node forward more than `amt_to_forward`
8167         // and checking that the receiving node doesn't generate
8168         // a PaymentClaimable event too early
8169         let node_count = 4;
8170         let chanmon_cfgs = create_chanmon_cfgs(node_count);
8171         let node_cfgs = create_node_cfgs(node_count, &chanmon_cfgs);
8172         let node_chanmgrs = create_node_chanmgrs(node_count, &node_cfgs, &vec![None; node_count]);
8173         let mut nodes = create_network(node_count, &node_cfgs, &node_chanmgrs);
8174
8175         let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8176         let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
8177         let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
8178         let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
8179
8180         let total_msat = 100_000;
8181         let expected_paths: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]];
8182         let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], total_msat);
8183         let sample_path = route.paths.pop().unwrap();
8184
8185         let mut path_1 = sample_path.clone();
8186         path_1.hops[0].pubkey = nodes[1].node.get_our_node_id();
8187         path_1.hops[0].short_channel_id = chan_1_id;
8188         path_1.hops[1].pubkey = nodes[3].node.get_our_node_id();
8189         path_1.hops[1].short_channel_id = chan_3_id;
8190         path_1.hops[1].fee_msat = 100_000;
8191         route.paths.push(path_1);
8192
8193         let mut path_2 = sample_path.clone();
8194         path_2.hops[0].pubkey = nodes[2].node.get_our_node_id();
8195         path_2.hops[0].short_channel_id = chan_2_id;
8196         path_2.hops[1].pubkey = nodes[3].node.get_our_node_id();
8197         path_2.hops[1].short_channel_id = chan_4_id;
8198         path_2.hops[1].fee_msat = 1_000;
8199         route.paths.push(path_2);
8200
8201         // Send payment
8202         let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes());
8203         let onion_session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
8204                 RecipientOnionFields::secret_only(our_payment_secret), payment_id, &route).unwrap();
8205         nodes[0].node.test_send_payment_internal(&route, our_payment_hash,
8206                 RecipientOnionFields::secret_only(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap();
8207         check_added_monitors!(nodes[0], expected_paths.len());
8208
8209         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8210         assert_eq!(events.len(), expected_paths.len());
8211
8212         // First path
8213         let ev = remove_first_msg_event_to_node(&expected_paths[0][0].node.get_our_node_id(), &mut events);
8214         let mut payment_event = SendEvent::from_event(ev);
8215         let mut prev_node = &nodes[0];
8216
8217         for (idx, &node) in expected_paths[0].iter().enumerate() {
8218                 assert_eq!(node.node.get_our_node_id(), payment_event.node_id);
8219
8220                 if idx == 0 { // routing node
8221                         let session_priv = [3; 32];
8222                         let height = nodes[0].best_block_info().1;
8223                         let session_priv = SecretKey::from_slice(&session_priv).unwrap();
8224                         let mut onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
8225                         let (mut onion_payloads, _, _) = onion_utils::build_onion_payloads(&route.paths[0], 100_000,
8226                                 RecipientOnionFields::secret_only(our_payment_secret), height + 1, &None).unwrap();
8227                         // Edit amt_to_forward to simulate the sender having set
8228                         // the final amount and the routing node taking less fee
8229                         if let msgs::OutboundOnionPayload::Receive {
8230                                 ref mut sender_intended_htlc_amt_msat, ..
8231                         } = onion_payloads[1] {
8232                                 *sender_intended_htlc_amt_msat = 99_000;
8233                         } else { panic!() }
8234                         let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap();
8235                         payment_event.msgs[0].onion_routing_packet = new_onion_packet;
8236                 }
8237
8238                 node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]);
8239                 check_added_monitors!(node, 0);
8240                 commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false);
8241                 expect_pending_htlcs_forwardable!(node);
8242
8243                 if idx == 0 {
8244                         let mut events_2 = node.node.get_and_clear_pending_msg_events();
8245                         assert_eq!(events_2.len(), 1);
8246                         check_added_monitors!(node, 1);
8247                         payment_event = SendEvent::from_event(events_2.remove(0));
8248                         assert_eq!(payment_event.msgs.len(), 1);
8249                 } else {
8250                         let events_2 = node.node.get_and_clear_pending_events();
8251                         assert!(events_2.is_empty());
8252                 }
8253
8254                 prev_node = node;
8255         }
8256
8257         // Second path
8258         let ev = remove_first_msg_event_to_node(&expected_paths[1][0].node.get_our_node_id(), &mut events);
8259         pass_along_path(&nodes[0], expected_paths[1], 101_000, our_payment_hash.clone(), Some(our_payment_secret), ev, true, None);
8260
8261         claim_payment_along_route(&nodes[0], expected_paths, false, our_payment_preimage);
8262 }
8263
8264 fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64) {
8265
8266         let routing_node_count = msat_amounts.len();
8267         let node_count = routing_node_count + 2;
8268
8269         let chanmon_cfgs = create_chanmon_cfgs(node_count);
8270         let node_cfgs = create_node_cfgs(node_count, &chanmon_cfgs);
8271         let node_chanmgrs = create_node_chanmgrs(node_count, &node_cfgs, &vec![None; node_count]);
8272         let nodes = create_network(node_count, &node_cfgs, &node_chanmgrs);
8273
8274         let src_idx = 0;
8275         let dst_idx = 1;
8276
8277         // Create channels for each amount
8278         let mut expected_paths = Vec::with_capacity(routing_node_count);
8279         let mut src_chan_ids = Vec::with_capacity(routing_node_count);
8280         let mut dst_chan_ids = Vec::with_capacity(routing_node_count);
8281         for i in 0..routing_node_count {
8282                 let routing_node = 2 + i;
8283                 let src_chan_id = create_announced_chan_between_nodes(&nodes, src_idx, routing_node).0.contents.short_channel_id;
8284                 src_chan_ids.push(src_chan_id);
8285                 let dst_chan_id = create_announced_chan_between_nodes(&nodes, routing_node, dst_idx).0.contents.short_channel_id;
8286                 dst_chan_ids.push(dst_chan_id);
8287                 let path = vec![&nodes[routing_node], &nodes[dst_idx]];
8288                 expected_paths.push(path);
8289         }
8290         let expected_paths: Vec<&[&Node]> = expected_paths.iter().map(|route| route.as_slice()).collect();
8291
8292         // Create a route for each amount
8293         let example_amount = 100000;
8294         let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[src_idx], nodes[dst_idx], example_amount);
8295         let sample_path = route.paths.pop().unwrap();
8296         for i in 0..routing_node_count {
8297                 let routing_node = 2 + i;
8298                 let mut path = sample_path.clone();
8299                 path.hops[0].pubkey = nodes[routing_node].node.get_our_node_id();
8300                 path.hops[0].short_channel_id = src_chan_ids[i];
8301                 path.hops[1].pubkey = nodes[dst_idx].node.get_our_node_id();
8302                 path.hops[1].short_channel_id = dst_chan_ids[i];
8303                 path.hops[1].fee_msat = msat_amounts[i];
8304                 route.paths.push(path);
8305         }
8306
8307         // Send payment with manually set total_msat
8308         let payment_id = PaymentId(nodes[src_idx].keys_manager.backing.get_secure_random_bytes());
8309         let onion_session_privs = nodes[src_idx].node.test_add_new_pending_payment(our_payment_hash,
8310                 RecipientOnionFields::secret_only(our_payment_secret), payment_id, &route).unwrap();
8311         nodes[src_idx].node.test_send_payment_internal(&route, our_payment_hash,
8312                 RecipientOnionFields::secret_only(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap();
8313         check_added_monitors!(nodes[src_idx], expected_paths.len());
8314
8315         let mut events = nodes[src_idx].node.get_and_clear_pending_msg_events();
8316         assert_eq!(events.len(), expected_paths.len());
8317         let mut amount_received = 0;
8318         for (path_idx, expected_path) in expected_paths.iter().enumerate() {
8319                 let ev = remove_first_msg_event_to_node(&expected_path[0].node.get_our_node_id(), &mut events);
8320
8321                 let current_path_amount = msat_amounts[path_idx];
8322                 amount_received += current_path_amount;
8323                 let became_claimable_now = amount_received >= total_msat && amount_received - current_path_amount < total_msat;
8324                 pass_along_path(&nodes[src_idx], expected_path, amount_received, our_payment_hash.clone(), Some(our_payment_secret), ev, became_claimable_now, None);
8325         }
8326
8327         claim_payment_along_route(&nodes[src_idx], &expected_paths, false, our_payment_preimage);
8328 }
8329
8330 #[test]
8331 fn test_overshoot_mpp() {
8332         do_test_overshoot_mpp(&[100_000, 101_000], 200_000);
8333         do_test_overshoot_mpp(&[100_000, 10_000, 100_000], 200_000);
8334 }
8335
8336 #[test]
8337 fn test_simple_mpp() {
8338         // Simple test of sending a multi-path payment.
8339         let chanmon_cfgs = create_chanmon_cfgs(4);
8340         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
8341         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
8342         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
8343
8344         let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8345         let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
8346         let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
8347         let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
8348
8349         let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
8350         let path = route.paths[0].clone();
8351         route.paths.push(path);
8352         route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id();
8353         route.paths[0].hops[0].short_channel_id = chan_1_id;
8354         route.paths[0].hops[1].short_channel_id = chan_3_id;
8355         route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id();
8356         route.paths[1].hops[0].short_channel_id = chan_2_id;
8357         route.paths[1].hops[1].short_channel_id = chan_4_id;
8358         send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret);
8359         claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
8360 }
8361
8362 #[test]
8363 fn test_preimage_storage() {
8364         // Simple test of payment preimage storage allowing no client-side storage to claim payments
8365         let chanmon_cfgs = create_chanmon_cfgs(2);
8366         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8367         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8368         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8369
8370         create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8371
8372         {
8373                 let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 7200, None).unwrap();
8374                 let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
8375                 nodes[0].node.send_payment_with_route(&route, payment_hash,
8376                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
8377                 check_added_monitors!(nodes[0], 1);
8378                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8379                 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
8380                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8381                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8382         }
8383         // Note that after leaving the above scope we have no knowledge of any arguments or return
8384         // values from previous calls.
8385         expect_pending_htlcs_forwardable!(nodes[1]);
8386         let events = nodes[1].node.get_and_clear_pending_events();
8387         assert_eq!(events.len(), 1);
8388         match events[0] {
8389                 Event::PaymentClaimable { ref purpose, .. } => {
8390                         match &purpose {
8391                                 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, .. } => {
8392                                         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage.unwrap());
8393                                 },
8394                                 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
8395                         }
8396                 },
8397                 _ => panic!("Unexpected event"),
8398         }
8399 }
8400
8401 #[test]
8402 fn test_bad_secret_hash() {
8403         // Simple test of unregistered payment hash/invalid payment secret handling
8404         let chanmon_cfgs = create_chanmon_cfgs(2);
8405         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8406         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8407         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8408
8409         create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8410
8411         let random_payment_hash = PaymentHash([42; 32]);
8412         let random_payment_secret = PaymentSecret([43; 32]);
8413         let (our_payment_hash, our_payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 2, None).unwrap();
8414         let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
8415
8416         // All the below cases should end up being handled exactly identically, so we macro the
8417         // resulting events.
8418         macro_rules! handle_unknown_invalid_payment_data {
8419                 ($payment_hash: expr) => {
8420                         check_added_monitors!(nodes[0], 1);
8421                         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8422                         let payment_event = SendEvent::from_event(events.pop().unwrap());
8423                         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8424                         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8425
8426                         // We have to forward pending HTLCs once to process the receipt of the HTLC and then
8427                         // again to process the pending backwards-failure of the HTLC
8428                         expect_pending_htlcs_forwardable!(nodes[1]);
8429                         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment{ payment_hash: $payment_hash }]);
8430                         check_added_monitors!(nodes[1], 1);
8431
8432                         // We should fail the payment back
8433                         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
8434                         match events.pop().unwrap() {
8435                                 MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } } => {
8436                                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
8437                                         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
8438                                 },
8439                                 _ => panic!("Unexpected event"),
8440                         }
8441                 }
8442         }
8443
8444         let expected_error_code = 0x4000|15; // incorrect_or_unknown_payment_details
8445         // Error data is the HTLC value (100,000) and current block height
8446         let expected_error_data = [0, 0, 0, 0, 0, 1, 0x86, 0xa0, 0, 0, 0, CHAN_CONFIRM_DEPTH as u8];
8447
8448         // Send a payment with the right payment hash but the wrong payment secret
8449         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
8450                 RecipientOnionFields::secret_only(random_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
8451         handle_unknown_invalid_payment_data!(our_payment_hash);
8452         expect_payment_failed!(nodes[0], our_payment_hash, true, expected_error_code, expected_error_data);
8453
8454         // Send a payment with a random payment hash, but the right payment secret
8455         nodes[0].node.send_payment_with_route(&route, random_payment_hash,
8456                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(random_payment_hash.0)).unwrap();
8457         handle_unknown_invalid_payment_data!(random_payment_hash);
8458         expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
8459
8460         // Send a payment with a random payment hash and random payment secret
8461         nodes[0].node.send_payment_with_route(&route, random_payment_hash,
8462                 RecipientOnionFields::secret_only(random_payment_secret), PaymentId(random_payment_hash.0)).unwrap();
8463         handle_unknown_invalid_payment_data!(random_payment_hash);
8464         expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
8465 }
8466
8467 #[test]
8468 fn test_update_err_monitor_lockdown() {
8469         // Our monitor will lock update of local commitment transaction if a broadcastion condition
8470         // has been fulfilled (either force-close from Channel or block height requiring a HTLC-
8471         // timeout). Trying to update monitor after lockdown should return a ChannelMonitorUpdateStatus
8472         // error.
8473         //
8474         // This scenario may happen in a watchtower setup, where watchtower process a block height
8475         // triggering a timeout while a slow-block-processing ChannelManager receives a local signed
8476         // commitment at same time.
8477
8478         let chanmon_cfgs = create_chanmon_cfgs(2);
8479         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8480         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8481         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8482
8483         // Create some initial channel
8484         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
8485         let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 };
8486
8487         // Rebalance the network to generate htlc in the two directions
8488         send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
8489
8490         // Route a HTLC from node 0 to node 1 (but don't settle)
8491         let (preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
8492
8493         // Copy ChainMonitor to simulate a watchtower and update block height of node 0 until its ChannelMonitor timeout HTLC onchain
8494         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8495         let logger = test_utils::TestLogger::with_id(format!("node {}", 0));
8496         let persister = test_utils::TestPersister::new();
8497         let watchtower = {
8498                 let new_monitor = {
8499                         let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8500                         let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
8501                                         &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8502                         assert!(new_monitor == *monitor);
8503                         new_monitor
8504                 };
8505                 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8506                 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
8507                 watchtower
8508         };
8509         let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new());
8510         // Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating
8511         // transaction lock time requirements here.
8512         chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize(200, (block.clone(), 200));
8513         watchtower.chain_monitor.block_connected(&block, 200);
8514
8515         // Try to update ChannelMonitor
8516         nodes[1].node.claim_funds(preimage);
8517         check_added_monitors!(nodes[1], 1);
8518         expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
8519
8520         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8521         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
8522         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
8523         {
8524                 let mut node_0_per_peer_lock;
8525                 let mut node_0_peer_state_lock;
8526                 if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
8527                         if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
8528                                 assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress);
8529                                 assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8530                         } else { assert!(false); }
8531                 } else {
8532                         assert!(false);
8533                 }
8534         }
8535         // Our local monitor is in-sync and hasn't processed yet timeout
8536         check_added_monitors!(nodes[0], 1);
8537         let events = nodes[0].node.get_and_clear_pending_events();
8538         assert_eq!(events.len(), 1);
8539 }
8540
8541 #[test]
8542 fn test_concurrent_monitor_claim() {
8543         // Watchtower A receives block, broadcasts state N, then channel receives new state N+1,
8544         // sending it to both watchtowers, Bob accepts N+1, then receives block and broadcasts
8545         // the latest state N+1, Alice rejects state N+1, but Bob has already broadcast it,
8546         // state N+1 confirms. Alice claims output from state N+1.
8547
8548         let chanmon_cfgs = create_chanmon_cfgs(2);
8549         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8550         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8551         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8552
8553         // Create some initial channel
8554         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
8555         let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 };
8556
8557         // Rebalance the network to generate htlc in the two directions
8558         send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
8559
8560         // Route a HTLC from node 0 to node 1 (but don't settle)
8561         route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
8562
8563         // Copy ChainMonitor to simulate watchtower Alice and update block height her ChannelMonitor timeout HTLC onchain
8564         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8565         let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice"));
8566         let persister = test_utils::TestPersister::new();
8567         let alice_broadcaster = test_utils::TestBroadcaster::with_blocks(
8568                 Arc::new(Mutex::new(nodes[0].blocks.lock().unwrap().clone())),
8569         );
8570         let watchtower_alice = {
8571                 let new_monitor = {
8572                         let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8573                         let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
8574                                         &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8575                         assert!(new_monitor == *monitor);
8576                         new_monitor
8577                 };
8578                 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &alice_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8579                 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
8580                 watchtower
8581         };
8582         let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new());
8583         // Make Alice aware of enough blocks that it doesn't think we're violating transaction lock time
8584         // requirements here.
8585         const HTLC_TIMEOUT_BROADCAST: u32 = CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS;
8586         alice_broadcaster.blocks.lock().unwrap().resize((HTLC_TIMEOUT_BROADCAST) as usize, (block.clone(), HTLC_TIMEOUT_BROADCAST));
8587         watchtower_alice.chain_monitor.block_connected(&block, HTLC_TIMEOUT_BROADCAST);
8588
8589         // Watchtower Alice should have broadcast a commitment/HTLC-timeout
8590         {
8591                 let mut txn = alice_broadcaster.txn_broadcast();
8592                 assert_eq!(txn.len(), 2);
8593                 check_spends!(txn[0], chan_1.3);
8594                 check_spends!(txn[1], txn[0]);
8595         };
8596
8597         // Copy ChainMonitor to simulate watchtower Bob and make it receive a commitment update first.
8598         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8599         let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob"));
8600         let persister = test_utils::TestPersister::new();
8601         let bob_broadcaster = test_utils::TestBroadcaster::with_blocks(Arc::clone(&alice_broadcaster.blocks));
8602         let watchtower_bob = {
8603                 let new_monitor = {
8604                         let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8605                         let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
8606                                         &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8607                         assert!(new_monitor == *monitor);
8608                         new_monitor
8609                 };
8610                 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &bob_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8611                 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
8612                 watchtower
8613         };
8614         watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST - 1);
8615
8616         // Route another payment to generate another update with still previous HTLC pending
8617         let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 3000000);
8618         nodes[1].node.send_payment_with_route(&route, payment_hash,
8619                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
8620         check_added_monitors!(nodes[1], 1);
8621
8622         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8623         assert_eq!(updates.update_add_htlcs.len(), 1);
8624         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]);
8625         {
8626                 let mut node_0_per_peer_lock;
8627                 let mut node_0_peer_state_lock;
8628                 if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
8629                         if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
8630                                 // Watchtower Alice should already have seen the block and reject the update
8631                                 assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress);
8632                                 assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8633                                 assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8634                         } else { assert!(false); }
8635                 } else {
8636                         assert!(false);
8637                 }
8638         }
8639         // Our local monitor is in-sync and hasn't processed yet timeout
8640         check_added_monitors!(nodes[0], 1);
8641
8642         //// Provide one more block to watchtower Bob, expect broadcast of commitment and HTLC-Timeout
8643         watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST);
8644
8645         // Watchtower Bob should have broadcast a commitment/HTLC-timeout
8646         let bob_state_y;
8647         {
8648                 let mut txn = bob_broadcaster.txn_broadcast();
8649                 assert_eq!(txn.len(), 2);
8650                 bob_state_y = txn.remove(0);
8651         };
8652
8653         // We confirm Bob's state Y on Alice, she should broadcast a HTLC-timeout
8654         let height = HTLC_TIMEOUT_BROADCAST + 1;
8655         connect_blocks(&nodes[0], height - nodes[0].best_block_info().1);
8656         check_closed_broadcast(&nodes[0], 1, true);
8657         check_closed_event!(&nodes[0], 1, ClosureReason::HTLCsTimedOut, false,
8658                 [nodes[1].node.get_our_node_id()], 100000);
8659         watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height);
8660         check_added_monitors(&nodes[0], 1);
8661         {
8662                 let htlc_txn = alice_broadcaster.txn_broadcast();
8663                 assert_eq!(htlc_txn.len(), 1);
8664                 check_spends!(htlc_txn[0], bob_state_y);
8665         }
8666 }
8667
8668 #[test]
8669 fn test_pre_lockin_no_chan_closed_update() {
8670         // Test that if a peer closes a channel in response to a funding_created message we don't
8671         // generate a channel update (as the channel cannot appear on chain without a funding_signed
8672         // message).
8673         //
8674         // Doing so would imply a channel monitor update before the initial channel monitor
8675         // registration, violating our API guarantees.
8676         //
8677         // Previously, full_stack_target managed to hit this case by opening then closing a channel,
8678         // then opening a second channel with the same funding output as the first (which is not
8679         // rejected because the first channel does not exist in the ChannelManager) and closing it
8680         // before receiving funding_signed.
8681         let chanmon_cfgs = create_chanmon_cfgs(2);
8682         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8683         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8684         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8685
8686         // Create an initial channel
8687         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
8688         let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8689         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
8690         let accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
8691         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
8692
8693         // Move the first channel through the funding flow...
8694         let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
8695
8696         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
8697         check_added_monitors!(nodes[0], 0);
8698
8699         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
8700         let channel_id = ChannelId::v1_from_funding_outpoint(crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index });
8701         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
8702         assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
8703         check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true,
8704                 [nodes[1].node.get_our_node_id()], 100000);
8705 }
8706
8707 #[test]
8708 fn test_htlc_no_detection() {
8709         // This test is a mutation to underscore the detection logic bug we had
8710         // before #653. HTLC value routed is above the remaining balance, thus
8711         // inverting HTLC and `to_remote` output. HTLC will come second and
8712         // it wouldn't be seen by pre-#653 detection as we were enumerate()'ing
8713         // on a watched outputs vector (Vec<TxOut>) thus implicitly relying on
8714         // outputs order detection for correct spending children filtring.
8715
8716         let chanmon_cfgs = create_chanmon_cfgs(2);
8717         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8718         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8719         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8720
8721         // Create some initial channels
8722         let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
8723
8724         send_payment(&nodes[0], &vec!(&nodes[1])[..], 1_000_000);
8725         let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 2_000_000);
8726         let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
8727         assert_eq!(local_txn[0].input.len(), 1);
8728         assert_eq!(local_txn[0].output.len(), 3);
8729         check_spends!(local_txn[0], chan_1.3);
8730
8731         // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
8732         let block = create_dummy_block(nodes[0].best_block_hash(), 42, vec![local_txn[0].clone()]);
8733         connect_block(&nodes[0], &block);
8734         // We deliberately connect the local tx twice as this should provoke a failure calling
8735         // this test before #653 fix.
8736         chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &block, nodes[0].best_block_info().1 + 1);
8737         check_closed_broadcast!(nodes[0], true);
8738         check_added_monitors!(nodes[0], 1);
8739         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
8740         connect_blocks(&nodes[0], TEST_FINAL_CLTV);
8741
8742         let htlc_timeout = {
8743                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8744                 assert_eq!(node_txn.len(), 1);
8745                 assert_eq!(node_txn[0].input.len(), 1);
8746                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
8747                 check_spends!(node_txn[0], local_txn[0]);
8748                 node_txn[0].clone()
8749         };
8750
8751         connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![htlc_timeout.clone()]));
8752         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
8753         expect_payment_failed!(nodes[0], our_payment_hash, false);
8754 }
8755
8756 fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain_before_fulfill: bool) {
8757         // If we route an HTLC, then learn the HTLC's preimage after the upstream channel has been
8758         // force-closed, we must claim that HTLC on-chain. (Given an HTLC forwarded from Alice --> Bob -->
8759         // Carol, Alice would be the upstream node, and Carol the downstream.)
8760         //
8761         // Steps of the test:
8762         // 1) Alice sends a HTLC to Carol through Bob.
8763         // 2) Carol doesn't settle the HTLC.
8764         // 3) If broadcast_alice is true, Alice force-closes her channel with Bob. Else Bob force closes.
8765         // Steps 4 and 5 may be reordered depending on go_onchain_before_fulfill.
8766         // 4) Bob sees the Alice's commitment on his chain or vice versa. An offered output is present
8767         //    but can't be claimed as Bob doesn't have yet knowledge of the preimage.
8768         // 5) Carol release the preimage to Bob off-chain.
8769         // 6) Bob claims the offered output on the broadcasted commitment.
8770         let chanmon_cfgs = create_chanmon_cfgs(3);
8771         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
8772         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
8773         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
8774
8775         // Create some initial channels
8776         let chan_ab = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
8777         create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 10001);
8778
8779         // Steps (1) and (2):
8780         // Send an HTLC Alice --> Bob --> Carol, but Carol doesn't settle the HTLC back.
8781         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
8782
8783         // Check that Alice's commitment transaction now contains an output for this HTLC.
8784         let alice_txn = get_local_commitment_txn!(nodes[0], chan_ab.2);
8785         check_spends!(alice_txn[0], chan_ab.3);
8786         assert_eq!(alice_txn[0].output.len(), 2);
8787         check_spends!(alice_txn[1], alice_txn[0]); // 2nd transaction is a non-final HTLC-timeout
8788         assert_eq!(alice_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
8789         assert_eq!(alice_txn.len(), 2);
8790
8791         // Steps (3) and (4):
8792         // If `go_onchain_before_fufill`, broadcast the relevant commitment transaction and check that Bob
8793         // responds by (1) broadcasting a channel update and (2) adding a new ChannelMonitor.
8794         let mut force_closing_node = 0; // Alice force-closes
8795         let mut counterparty_node = 1; // Bob if Alice force-closes
8796
8797         // Bob force-closes
8798         if !broadcast_alice {
8799                 force_closing_node = 1;
8800                 counterparty_node = 0;
8801         }
8802         nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
8803         check_closed_broadcast!(nodes[force_closing_node], true);
8804         check_added_monitors!(nodes[force_closing_node], 1);
8805         check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed, [nodes[counterparty_node].node.get_our_node_id()], 100000);
8806         if go_onchain_before_fulfill {
8807                 let txn_to_broadcast = match broadcast_alice {
8808                         true => alice_txn.clone(),
8809                         false => get_local_commitment_txn!(nodes[1], chan_ab.2)
8810                 };
8811                 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()]));
8812                 if broadcast_alice {
8813                         check_closed_broadcast!(nodes[1], true);
8814                         check_added_monitors!(nodes[1], 1);
8815                         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
8816                 }
8817         }
8818
8819         // Step (5):
8820         // Carol then claims the funds and sends an update_fulfill message to Bob, and they go through the
8821         // process of removing the HTLC from their commitment transactions.
8822         nodes[2].node.claim_funds(payment_preimage);
8823         check_added_monitors!(nodes[2], 1);
8824         expect_payment_claimed!(nodes[2], payment_hash, 3_000_000);
8825
8826         let carol_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
8827         assert!(carol_updates.update_add_htlcs.is_empty());
8828         assert!(carol_updates.update_fail_htlcs.is_empty());
8829         assert!(carol_updates.update_fail_malformed_htlcs.is_empty());
8830         assert!(carol_updates.update_fee.is_none());
8831         assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1);
8832
8833         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]);
8834         let went_onchain = go_onchain_before_fulfill || force_closing_node == 1;
8835         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], if went_onchain { None } else { Some(1000) }, went_onchain, false);
8836         // If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage.
8837         if !go_onchain_before_fulfill && broadcast_alice {
8838                 let events = nodes[1].node.get_and_clear_pending_msg_events();
8839                 assert_eq!(events.len(), 1);
8840                 match events[0] {
8841                         MessageSendEvent::UpdateHTLCs { ref node_id, .. } => {
8842                                 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8843                         },
8844                         _ => panic!("Unexpected event"),
8845                 };
8846         }
8847         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &carol_updates.commitment_signed);
8848         // One monitor update for the preimage to update the Bob<->Alice channel, one monitor update
8849         // Carol<->Bob's updated commitment transaction info.
8850         check_added_monitors!(nodes[1], 2);
8851
8852         let events = nodes[1].node.get_and_clear_pending_msg_events();
8853         assert_eq!(events.len(), 2);
8854         let bob_revocation = match events[0] {
8855                 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
8856                         assert_eq!(*node_id, nodes[2].node.get_our_node_id());
8857                         (*msg).clone()
8858                 },
8859                 _ => panic!("Unexpected event"),
8860         };
8861         let bob_updates = match events[1] {
8862                 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
8863                         assert_eq!(*node_id, nodes[2].node.get_our_node_id());
8864                         (*updates).clone()
8865                 },
8866                 _ => panic!("Unexpected event"),
8867         };
8868
8869         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revocation);
8870         check_added_monitors!(nodes[2], 1);
8871         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_updates.commitment_signed);
8872         check_added_monitors!(nodes[2], 1);
8873
8874         let events = nodes[2].node.get_and_clear_pending_msg_events();
8875         assert_eq!(events.len(), 1);
8876         let carol_revocation = match events[0] {
8877                 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
8878                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
8879                         (*msg).clone()
8880                 },
8881                 _ => panic!("Unexpected event"),
8882         };
8883         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &carol_revocation);
8884         check_added_monitors!(nodes[1], 1);
8885
8886         // If this test requires the force-closed channel to not be on-chain until after the fulfill,
8887         // here's where we put said channel's commitment tx on-chain.
8888         let mut txn_to_broadcast = alice_txn.clone();
8889         if !broadcast_alice { txn_to_broadcast = get_local_commitment_txn!(nodes[1], chan_ab.2); }
8890         if !go_onchain_before_fulfill {
8891                 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()]));
8892                 // If Bob was the one to force-close, he will have already passed these checks earlier.
8893                 if broadcast_alice {
8894                         check_closed_broadcast!(nodes[1], true);
8895                         check_added_monitors!(nodes[1], 1);
8896                         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
8897                 }
8898                 let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
8899                 if broadcast_alice {
8900                         assert_eq!(bob_txn.len(), 1);
8901                         check_spends!(bob_txn[0], txn_to_broadcast[0]);
8902                 } else {
8903                         if nodes[1].connect_style.borrow().updates_best_block_first() {
8904                                 assert_eq!(bob_txn.len(), 3);
8905                                 assert_eq!(bob_txn[0].txid(), bob_txn[1].txid());
8906                         } else {
8907                                 assert_eq!(bob_txn.len(), 2);
8908                         }
8909                         check_spends!(bob_txn[0], chan_ab.3);
8910                 }
8911         }
8912
8913         // Step (6):
8914         // Finally, check that Bob broadcasted a preimage-claiming transaction for the HTLC output on the
8915         // broadcasted commitment transaction.
8916         {
8917                 let script_weight = match broadcast_alice {
8918                         true => OFFERED_HTLC_SCRIPT_WEIGHT,
8919                         false => ACCEPTED_HTLC_SCRIPT_WEIGHT
8920                 };
8921                 // If Alice force-closed, Bob only broadcasts a HTLC-output-claiming transaction. Otherwise,
8922                 // Bob force-closed and broadcasts the commitment transaction along with a
8923                 // HTLC-output-claiming transaction.
8924                 let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
8925                 if broadcast_alice {
8926                         assert_eq!(bob_txn.len(), 1);
8927                         check_spends!(bob_txn[0], txn_to_broadcast[0]);
8928                         assert_eq!(bob_txn[0].input[0].witness.last().unwrap().len(), script_weight);
8929                 } else {
8930                         assert_eq!(bob_txn.len(), if nodes[1].connect_style.borrow().updates_best_block_first() { 3 } else { 2 });
8931                         let htlc_tx = bob_txn.pop().unwrap();
8932                         check_spends!(htlc_tx, txn_to_broadcast[0]);
8933                         assert_eq!(htlc_tx.input[0].witness.last().unwrap().len(), script_weight);
8934                 }
8935         }
8936 }
8937
8938 #[test]
8939 fn test_onchain_htlc_settlement_after_close() {
8940         do_test_onchain_htlc_settlement_after_close(true, true);
8941         do_test_onchain_htlc_settlement_after_close(false, true); // Technically redundant, but may as well
8942         do_test_onchain_htlc_settlement_after_close(true, false);
8943         do_test_onchain_htlc_settlement_after_close(false, false);
8944 }
8945
8946 #[test]
8947 fn test_duplicate_temporary_channel_id_from_different_peers() {
8948         // Tests that we can accept two different `OpenChannel` requests with the same
8949         // `temporary_channel_id`, as long as they are from different peers.
8950         let chanmon_cfgs = create_chanmon_cfgs(3);
8951         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
8952         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
8953         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
8954
8955         // Create an first channel channel
8956         nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
8957         let mut open_chan_msg_chan_1_0 = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
8958
8959         // Create an second channel
8960         nodes[2].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 43, None, None).unwrap();
8961         let mut open_chan_msg_chan_2_0 = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
8962
8963         // Modify the `OpenChannel` from `nodes[2]` to `nodes[0]` to ensure that it uses the same
8964         // `temporary_channel_id` as the `OpenChannel` from nodes[1] to nodes[0].
8965         open_chan_msg_chan_2_0.common_fields.temporary_channel_id = open_chan_msg_chan_1_0.common_fields.temporary_channel_id;
8966
8967         // Assert that `nodes[0]` can accept both `OpenChannel` requests, even though they use the same
8968         // `temporary_channel_id` as they are from different peers.
8969         nodes[0].node.handle_open_channel(&nodes[1].node.get_our_node_id(), &open_chan_msg_chan_1_0);
8970         {
8971                 let events = nodes[0].node.get_and_clear_pending_msg_events();
8972                 assert_eq!(events.len(), 1);
8973                 match &events[0] {
8974                         MessageSendEvent::SendAcceptChannel { node_id, msg } => {
8975                                 assert_eq!(node_id, &nodes[1].node.get_our_node_id());
8976                                 assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id);
8977                         },
8978                         _ => panic!("Unexpected event"),
8979                 }
8980         }
8981
8982         nodes[0].node.handle_open_channel(&nodes[2].node.get_our_node_id(), &open_chan_msg_chan_2_0);
8983         {
8984                 let events = nodes[0].node.get_and_clear_pending_msg_events();
8985                 assert_eq!(events.len(), 1);
8986                 match &events[0] {
8987                         MessageSendEvent::SendAcceptChannel { node_id, msg } => {
8988                                 assert_eq!(node_id, &nodes[2].node.get_our_node_id());
8989                                 assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id);
8990                         },
8991                         _ => panic!("Unexpected event"),
8992                 }
8993         }
8994 }
8995
8996 #[test]
8997 fn test_peer_funding_sidechannel() {
8998         // Test that if a peer somehow learns which txid we'll use for our channel funding before we
8999         // receive `funding_transaction_generated` the peer cannot cause us to crash. We'd previously
9000         // assumed that LDK would receive `funding_transaction_generated` prior to our peer learning
9001         // the txid and panicked if the peer tried to open a redundant channel to us with the same
9002         // funding outpoint.
9003         //
9004         // While this assumption is generally safe, some users may have out-of-band protocols where
9005         // they notify their LSP about a funding outpoint first, or this may be violated in the future
9006         // with collaborative transaction construction protocols, i.e. dual-funding.
9007         let chanmon_cfgs = create_chanmon_cfgs(3);
9008         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9009         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9010         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9011
9012         let temp_chan_id_ab = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
9013         let temp_chan_id_ca = exchange_open_accept_chan(&nodes[2], &nodes[0], 1_000_000, 0);
9014
9015         let (_, tx, funding_output) =
9016                 create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
9017
9018         let cs_funding_events = nodes[2].node.get_and_clear_pending_events();
9019         assert_eq!(cs_funding_events.len(), 1);
9020         match cs_funding_events[0] {
9021                 Event::FundingGenerationReady { .. } => {}
9022                 _ => panic!("Unexpected event {:?}", cs_funding_events),
9023         }
9024
9025         nodes[2].node.funding_transaction_generated_unchecked(&temp_chan_id_ca, &nodes[0].node.get_our_node_id(), tx.clone(), funding_output.index).unwrap();
9026         let funding_created_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, nodes[0].node.get_our_node_id());
9027         nodes[0].node.handle_funding_created(&nodes[2].node.get_our_node_id(), &funding_created_msg);
9028         get_event_msg!(nodes[0], MessageSendEvent::SendFundingSigned, nodes[2].node.get_our_node_id());
9029         expect_channel_pending_event(&nodes[0], &nodes[2].node.get_our_node_id());
9030         check_added_monitors!(nodes[0], 1);
9031
9032         let res = nodes[0].node.funding_transaction_generated(&temp_chan_id_ab, &nodes[1].node.get_our_node_id(), tx.clone());
9033         let err_msg = format!("{:?}", res.unwrap_err());
9034         assert!(err_msg.contains("An existing channel using outpoint "));
9035         assert!(err_msg.contains(" is open with peer"));
9036         // Even though the last funding_transaction_generated errored, it still generated a
9037         // SendFundingCreated. However, when the peer responds with a funding_signed it will send the
9038         // appropriate error message.
9039         let as_funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9040         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &as_funding_created);
9041         check_added_monitors!(nodes[1], 1);
9042         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9043         let reason = ClosureReason::ProcessingError { err: format!("An existing channel using outpoint {} is open with peer {}", funding_output, nodes[2].node.get_our_node_id()), };
9044         check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(ChannelId::v1_from_funding_outpoint(funding_output), true, reason)]);
9045
9046         let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9047         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
9048         get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id());
9049 }
9050
9051 #[test]
9052 fn test_duplicate_conflicting_funding_from_second_peer() {
9053         // Test that if a user tries to fund a channel with a funding outpoint they'd previously used
9054         // we don't try to remove the previous ChannelMonitor. This is largely a test to ensure we
9055         // don't regress in the fuzzer, as such funding getting passed our outpoint-matches checks
9056         // implies the user (and our counterparty) has reused cryptographic keys across channels, which
9057         // we require the user not do.
9058         let chanmon_cfgs = create_chanmon_cfgs(4);
9059         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9060         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9061         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9062
9063         let temp_chan_id = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
9064
9065         let (_, tx, funding_output) =
9066                 create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
9067
9068         // Now that we have a funding outpoint, create a dummy `ChannelMonitor` and insert it into
9069         // nodes[0]'s ChainMonitor so that the initial `ChannelMonitor` write fails.
9070         let dummy_chan_id = create_chan_between_nodes(&nodes[2], &nodes[3]).3;
9071         let dummy_monitor = get_monitor!(nodes[2], dummy_chan_id).clone();
9072         nodes[0].chain_monitor.chain_monitor.watch_channel(funding_output, dummy_monitor).unwrap();
9073
9074         nodes[0].node.funding_transaction_generated(&temp_chan_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9075
9076         let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9077         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
9078         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9079         check_added_monitors!(nodes[1], 1);
9080         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9081
9082         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
9083         // At this point, the channel should be closed, after having generated one monitor write (the
9084         // watch_channel call which failed), but zero monitor updates.
9085         check_added_monitors!(nodes[0], 1);
9086         get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id());
9087         let err_reason = ClosureReason::ProcessingError { err: "Channel funding outpoint was a duplicate".to_owned() };
9088         check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(funding_signed_msg.channel_id, true, err_reason)]);
9089 }
9090
9091 #[test]
9092 fn test_duplicate_funding_err_in_funding() {
9093         // Test that if we have a live channel with one peer, then another peer comes along and tries
9094         // to create a second channel with the same txid we'll fail and not overwrite the
9095         // outpoint_to_peer map in `ChannelManager`.
9096         //
9097         // This was previously broken.
9098         let chanmon_cfgs = create_chanmon_cfgs(3);
9099         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9100         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9101         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9102
9103         let (_, _, _, real_channel_id, funding_tx) = create_chan_between_nodes(&nodes[0], &nodes[1]);
9104         let real_chan_funding_txo = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 };
9105         assert_eq!(ChannelId::v1_from_funding_outpoint(real_chan_funding_txo), real_channel_id);
9106
9107         nodes[2].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
9108         let mut open_chan_msg = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9109         let node_c_temp_chan_id = open_chan_msg.common_fields.temporary_channel_id;
9110         open_chan_msg.common_fields.temporary_channel_id = real_channel_id;
9111         nodes[1].node.handle_open_channel(&nodes[2].node.get_our_node_id(), &open_chan_msg);
9112         let mut accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[2].node.get_our_node_id());
9113         accept_chan_msg.common_fields.temporary_channel_id = node_c_temp_chan_id;
9114         nodes[2].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
9115
9116         // Now that we have a second channel with the same funding txo, send a bogus funding message
9117         // and let nodes[1] remove the inbound channel.
9118         let (_, funding_tx, _) = create_funding_transaction(&nodes[2], &nodes[1].node.get_our_node_id(), 100_000, 42);
9119
9120         nodes[2].node.funding_transaction_generated(&node_c_temp_chan_id, &nodes[1].node.get_our_node_id(), funding_tx).unwrap();
9121
9122         let mut funding_created_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9123         funding_created_msg.temporary_channel_id = real_channel_id;
9124         // Make the signature invalid by changing the funding output
9125         funding_created_msg.funding_output_index += 10;
9126         nodes[1].node.handle_funding_created(&nodes[2].node.get_our_node_id(), &funding_created_msg);
9127         get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id());
9128         let err = "Invalid funding_created signature from peer".to_owned();
9129         let reason = ClosureReason::ProcessingError { err };
9130         let expected_closing = ExpectedCloseEvent::from_id_reason(real_channel_id, false, reason);
9131         check_closed_events(&nodes[1], &[expected_closing]);
9132
9133         assert_eq!(
9134                 *nodes[1].node.outpoint_to_peer.lock().unwrap().get(&real_chan_funding_txo).unwrap(),
9135                 nodes[0].node.get_our_node_id()
9136         );
9137 }
9138
9139 #[test]
9140 fn test_duplicate_chan_id() {
9141         // Test that if a given peer tries to open a channel with the same channel_id as one that is
9142         // already open we reject it and keep the old channel.
9143         //
9144         // Previously, full_stack_target managed to figure out that if you tried to open two channels
9145         // with the same funding output (ie post-funding channel_id), we'd create a monitor update for
9146         // the existing channel when we detect the duplicate new channel, screwing up our monitor
9147         // updating logic for the existing channel.
9148         let chanmon_cfgs = create_chanmon_cfgs(2);
9149         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9150         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9151         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9152
9153         // Create an initial channel
9154         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
9155         let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9156         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
9157         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9158
9159         // Try to create a second channel with the same temporary_channel_id as the first and check
9160         // that it is rejected.
9161         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
9162         {
9163                 let events = nodes[1].node.get_and_clear_pending_msg_events();
9164                 assert_eq!(events.len(), 1);
9165                 match events[0] {
9166                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
9167                                 // Technically, at this point, nodes[1] would be justified in thinking both the
9168                                 // first (valid) and second (invalid) channels are closed, given they both have
9169                                 // the same non-temporary channel_id. However, currently we do not, so we just
9170                                 // move forward with it.
9171                                 assert_eq!(msg.channel_id, open_chan_msg.common_fields.temporary_channel_id);
9172                                 assert_eq!(node_id, nodes[0].node.get_our_node_id());
9173                         },
9174                         _ => panic!("Unexpected event"),
9175                 }
9176         }
9177
9178         // Move the first channel through the funding flow...
9179         let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
9180
9181         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9182         check_added_monitors!(nodes[0], 0);
9183
9184         let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9185         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
9186         {
9187                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
9188                 assert_eq!(added_monitors.len(), 1);
9189                 assert_eq!(added_monitors[0].0, funding_output);
9190                 added_monitors.clear();
9191         }
9192         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9193
9194         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9195
9196         let funding_outpoint = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index };
9197         let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint);
9198
9199         // Now we have the first channel past funding_created (ie it has a txid-based channel_id, not a
9200         // temporary one).
9201
9202         // First try to open a second channel with a temporary channel id equal to the txid-based one.
9203         // Technically this is allowed by the spec, but we don't support it and there's little reason
9204         // to. Still, it shouldn't cause any other issues.
9205         open_chan_msg.common_fields.temporary_channel_id = channel_id;
9206         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
9207         {
9208                 let events = nodes[1].node.get_and_clear_pending_msg_events();
9209                 assert_eq!(events.len(), 1);
9210                 match events[0] {
9211                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
9212                                 // Technically, at this point, nodes[1] would be justified in thinking both
9213                                 // channels are closed, but currently we do not, so we just move forward with it.
9214                                 assert_eq!(msg.channel_id, open_chan_msg.common_fields.temporary_channel_id);
9215                                 assert_eq!(node_id, nodes[0].node.get_our_node_id());
9216                         },
9217                         _ => panic!("Unexpected event"),
9218                 }
9219         }
9220
9221         // Now try to create a second channel which has a duplicate funding output.
9222         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
9223         let open_chan_2_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9224         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_2_msg);
9225         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9226         create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); // Get and check the FundingGenerationReady event
9227
9228         let funding_created = {
9229                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
9230                 let mut a_peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
9231                 // Once we call `get_funding_created` the channel has a duplicate channel_id as
9232                 // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
9233                 // try to create another channel. Instead, we drop the channel entirely here (leaving the
9234                 // channelmanager in a possibly nonsense state instead).
9235                 match a_peer_state.channel_by_id.remove(&open_chan_2_msg.common_fields.temporary_channel_id).unwrap() {
9236                         ChannelPhase::UnfundedOutboundV1(mut chan) => {
9237                                 let logger = test_utils::TestLogger::new();
9238                                 chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap()
9239                         },
9240                         _ => panic!("Unexpected ChannelPhase variant"),
9241                 }.unwrap()
9242         };
9243         check_added_monitors!(nodes[0], 0);
9244         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
9245         // At this point we'll look up if the channel_id is present and immediately fail the channel
9246         // without trying to persist the `ChannelMonitor`.
9247         check_added_monitors!(nodes[1], 0);
9248
9249         check_closed_events(&nodes[1], &[
9250                 ExpectedCloseEvent::from_id_reason(funding_created.temporary_channel_id, false, ClosureReason::ProcessingError {
9251                         err: "Already had channel with the new channel_id".to_owned()
9252                 })
9253         ]);
9254
9255         // ...still, nodes[1] will reject the duplicate channel.
9256         {
9257                 let events = nodes[1].node.get_and_clear_pending_msg_events();
9258                 assert_eq!(events.len(), 1);
9259                 match events[0] {
9260                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
9261                                 // Technically, at this point, nodes[1] would be justified in thinking both
9262                                 // channels are closed, but currently we do not, so we just move forward with it.
9263                                 assert_eq!(msg.channel_id, channel_id);
9264                                 assert_eq!(node_id, nodes[0].node.get_our_node_id());
9265                         },
9266                         _ => panic!("Unexpected event"),
9267                 }
9268         }
9269
9270         // finally, finish creating the original channel and send a payment over it to make sure
9271         // everything is functional.
9272         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
9273         {
9274                 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
9275                 assert_eq!(added_monitors.len(), 1);
9276                 assert_eq!(added_monitors[0].0, funding_output);
9277                 added_monitors.clear();
9278         }
9279         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9280
9281         let events_4 = nodes[0].node.get_and_clear_pending_events();
9282         assert_eq!(events_4.len(), 0);
9283         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
9284         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
9285
9286         let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
9287         let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
9288         update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
9289
9290         send_payment(&nodes[0], &[&nodes[1]], 8000000);
9291 }
9292
9293 #[test]
9294 fn test_error_chans_closed() {
9295         // Test that we properly handle error messages, closing appropriate channels.
9296         //
9297         // Prior to #787 we'd allow a peer to make us force-close a channel we had with a different
9298         // peer. The "real" fix for that is to index channels with peers_ids, however in the mean time
9299         // we can test various edge cases around it to ensure we don't regress.
9300         let chanmon_cfgs = create_chanmon_cfgs(3);
9301         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9302         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9303         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9304
9305         // Create some initial channels
9306         let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9307         let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9308         let chan_3 = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100000, 10001);
9309
9310         assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
9311         assert_eq!(nodes[1].node.list_usable_channels().len(), 2);
9312         assert_eq!(nodes[2].node.list_usable_channels().len(), 1);
9313
9314         // Closing a channel from a different peer has no effect
9315         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_3.2, data: "ERR".to_owned() });
9316         assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
9317
9318         // Closing one channel doesn't impact others
9319         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
9320         check_added_monitors!(nodes[0], 1);
9321         check_closed_broadcast!(nodes[0], false);
9322         check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) },
9323                 [nodes[1].node.get_our_node_id()], 100000);
9324         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
9325         assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
9326         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
9327         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_3.2);
9328
9329         // A null channel ID should close all channels
9330         let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9331         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: ChannelId::new_zero(), data: "ERR".to_owned() });
9332         check_added_monitors!(nodes[0], 2);
9333         check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) },
9334                 [nodes[1].node.get_our_node_id(); 2], 100000);
9335         let events = nodes[0].node.get_and_clear_pending_msg_events();
9336         assert_eq!(events.len(), 2);
9337         match events[0] {
9338                 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
9339                         assert_eq!(msg.contents.flags & 2, 2);
9340                 },
9341                 _ => panic!("Unexpected event"),
9342         }
9343         match events[1] {
9344                 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
9345                         assert_eq!(msg.contents.flags & 2, 2);
9346                 },
9347                 _ => panic!("Unexpected event"),
9348         }
9349         // Note that at this point users of a standard PeerHandler will end up calling
9350         // peer_disconnected.
9351         assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
9352         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
9353
9354         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
9355         assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
9356         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
9357 }
9358
9359 #[test]
9360 fn test_invalid_funding_tx() {
9361         // Test that we properly handle invalid funding transactions sent to us from a peer.
9362         //
9363         // Previously, all other major lightning implementations had failed to properly sanitize
9364         // funding transactions from their counterparties, leading to a multi-implementation critical
9365         // security vulnerability (though we always sanitized properly, we've previously had
9366         // un-released crashes in the sanitization process).
9367         //
9368         // Further, if the funding transaction is consensus-valid, confirms, and is later spent, we'd
9369         // previously have crashed in `ChannelMonitor` even though we closed the channel as bogus and
9370         // gave up on it. We test this here by generating such a transaction.
9371         let chanmon_cfgs = create_chanmon_cfgs(2);
9372         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9373         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9374         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9375
9376         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 10_000, 42, None, None).unwrap();
9377         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
9378         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9379
9380         let (temporary_channel_id, mut tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
9381
9382         // Create a witness program which can be spent by a 4-empty-stack-elements witness and which is
9383         // 136 bytes long. This matches our "accepted HTLC preimage spend" matching, previously causing
9384         // a panic as we'd try to extract a 32 byte preimage from a witness element without checking
9385         // its length.
9386         let mut wit_program: Vec<u8> = channelmonitor::deliberately_bogus_accepted_htlc_witness_program();
9387         let wit_program_script: ScriptBuf = wit_program.into();
9388         for output in tx.output.iter_mut() {
9389                 // Make the confirmed funding transaction have a bogus script_pubkey
9390                 output.script_pubkey = ScriptBuf::new_v0_p2wsh(&wit_program_script.wscript_hash());
9391         }
9392
9393         nodes[0].node.funding_transaction_generated_unchecked(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone(), 0).unwrap();
9394         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
9395         check_added_monitors!(nodes[1], 1);
9396         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9397
9398         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
9399         check_added_monitors!(nodes[0], 1);
9400         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9401
9402         let events_1 = nodes[0].node.get_and_clear_pending_events();
9403         assert_eq!(events_1.len(), 0);
9404
9405         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
9406         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
9407         nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
9408
9409         let expected_err = "funding tx had wrong script/value or output index";
9410         confirm_transaction_at(&nodes[1], &tx, 1);
9411         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() },
9412                 [nodes[0].node.get_our_node_id()], 100000);
9413         check_added_monitors!(nodes[1], 1);
9414         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
9415         assert_eq!(events_2.len(), 1);
9416         if let MessageSendEvent::HandleError { node_id, action } = &events_2[0] {
9417                 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
9418                 if let msgs::ErrorAction::DisconnectPeer { msg } = action {
9419                         assert_eq!(msg.as_ref().unwrap().data, "Channel closed because of an exception: ".to_owned() + expected_err);
9420                 } else { panic!(); }
9421         } else { panic!(); }
9422         assert_eq!(nodes[1].node.list_channels().len(), 0);
9423
9424         // Now confirm a spend of the (bogus) funding transaction. As long as the witness is 5 elements
9425         // long the ChannelMonitor will try to read 32 bytes from the second-to-last element, panicing
9426         // as its not 32 bytes long.
9427         let mut spend_tx = Transaction {
9428                 version: 2i32, lock_time: LockTime::ZERO,
9429                 input: tx.output.iter().enumerate().map(|(idx, _)| TxIn {
9430                         previous_output: BitcoinOutPoint {
9431                                 txid: tx.txid(),
9432                                 vout: idx as u32,
9433                         },
9434                         script_sig: ScriptBuf::new(),
9435                         sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
9436                         witness: Witness::from_slice(&channelmonitor::deliberately_bogus_accepted_htlc_witness())
9437                 }).collect(),
9438                 output: vec![TxOut {
9439                         value: 1000,
9440                         script_pubkey: ScriptBuf::new(),
9441                 }]
9442         };
9443         check_spends!(spend_tx, tx);
9444         mine_transaction(&nodes[1], &spend_tx);
9445 }
9446
9447 #[test]
9448 fn test_coinbase_funding_tx() {
9449         // Miners are able to fund channels directly from coinbase transactions, however
9450         // by consensus rules, outputs of a coinbase transaction are encumbered by a 100
9451         // block maturity timelock. To ensure that a (non-0conf) channel like this is enforceable
9452         // on-chain, the minimum depth is updated to 100 blocks for coinbase funding transactions.
9453         //
9454         // Note that 0conf channels with coinbase funding transactions are unaffected and are
9455         // immediately operational after opening.
9456         let chanmon_cfgs = create_chanmon_cfgs(2);
9457         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9458         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9459         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9460
9461         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
9462         let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9463
9464         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
9465         let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
9466
9467         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
9468
9469         // Create the coinbase funding transaction.
9470         let (temporary_channel_id, tx, _) = create_coinbase_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
9471
9472         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9473         check_added_monitors!(nodes[0], 0);
9474         let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9475
9476         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
9477         check_added_monitors!(nodes[1], 1);
9478         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9479
9480         let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9481
9482         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
9483         check_added_monitors!(nodes[0], 1);
9484
9485         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9486         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
9487
9488         // Starting at height 0, we "confirm" the coinbase at height 1.
9489         confirm_transaction_at(&nodes[0], &tx, 1);
9490         // We connect 98 more blocks to have 99 confirmations for the coinbase transaction.
9491         connect_blocks(&nodes[0], COINBASE_MATURITY - 2);
9492         // Check that we have no pending message events (we have not queued a `channel_ready` yet).
9493         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
9494         // Now connect one more block which results in 100 confirmations of the coinbase transaction.
9495         connect_blocks(&nodes[0], 1);
9496         // There should now be a `channel_ready` which can be handled.
9497         let _ = &nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &get_event_msg!(&nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
9498
9499         confirm_transaction_at(&nodes[1], &tx, 1);
9500         connect_blocks(&nodes[1], COINBASE_MATURITY - 2);
9501         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
9502         connect_blocks(&nodes[1], 1);
9503         expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id());
9504         create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
9505 }
9506
9507 fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_timelock: bool) {
9508         // In the first version of the chain::Confirm interface, after a refactor was made to not
9509         // broadcast CSV-locked transactions until their CSV lock is up, we wouldn't reliably broadcast
9510         // transactions after a `transactions_confirmed` call. Specifically, if the chain, provided via
9511         // `best_block_updated` is at height N, and a transaction output which we wish to spend at
9512         // height N-1 (due to a CSV to height N-1) is provided at height N, we will not broadcast the
9513         // spending transaction until height N+1 (or greater). This was due to the way
9514         // `ChannelMonitor::transactions_confirmed` worked, only checking if we should broadcast a
9515         // spending transaction at the height the input transaction was confirmed at, not whether we
9516         // should broadcast a spending transaction at the current height.
9517         // A second, similar, issue involved failing HTLCs backwards - because we only provided the
9518         // height at which transactions were confirmed to `OnchainTx::update_claims_view`, it wasn't
9519         // aware that the anti-reorg-delay had, in fact, already expired, waiting to fail-backwards
9520         // until we learned about an additional block.
9521         //
9522         // As an additional check, if `test_height_before_timelock` is set, we instead test that we
9523         // aren't broadcasting transactions too early (ie not broadcasting them at all).
9524         let chanmon_cfgs = create_chanmon_cfgs(3);
9525         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9526         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9527         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9528         *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
9529
9530         create_announced_chan_between_nodes(&nodes, 0, 1);
9531         let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
9532         let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
9533         nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
9534         nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
9535
9536         nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
9537         check_closed_broadcast!(nodes[1], true);
9538         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
9539         check_added_monitors!(nodes[1], 1);
9540         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
9541         assert_eq!(node_txn.len(), 1);
9542
9543         let conf_height = nodes[1].best_block_info().1;
9544         if !test_height_before_timelock {
9545                 connect_blocks(&nodes[1], 24 * 6);
9546         }
9547         nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
9548                 &nodes[1].get_block_header(conf_height), &[(0, &node_txn[0])], conf_height);
9549         if test_height_before_timelock {
9550                 // If we confirmed the close transaction, but timelocks have not yet expired, we should not
9551                 // generate any events or broadcast any transactions
9552                 assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
9553                 assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
9554         } else {
9555                 // We should broadcast an HTLC transaction spending our funding transaction first
9556                 let spending_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
9557                 assert_eq!(spending_txn.len(), 2);
9558                 let htlc_tx = if spending_txn[0].txid() == node_txn[0].txid() {
9559                         &spending_txn[1]
9560                 } else {
9561                         &spending_txn[0]
9562                 };
9563                 check_spends!(htlc_tx, node_txn[0]);
9564                 // We should also generate a SpendableOutputs event with the to_self output (as its
9565                 // timelock is up).
9566                 let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
9567                 assert_eq!(descriptor_spend_txn.len(), 1);
9568
9569                 // If we also discover that the HTLC-Timeout transaction was confirmed some time ago, we
9570                 // should immediately fail-backwards the HTLC to the previous hop, without waiting for an
9571                 // additional block built on top of the current chain.
9572                 nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
9573                         &nodes[1].get_block_header(conf_height + 1), &[(0, htlc_tx)], conf_height + 1);
9574                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]);
9575                 check_added_monitors!(nodes[1], 1);
9576
9577                 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9578                 assert!(updates.update_add_htlcs.is_empty());
9579                 assert!(updates.update_fulfill_htlcs.is_empty());
9580                 assert_eq!(updates.update_fail_htlcs.len(), 1);
9581                 assert!(updates.update_fail_malformed_htlcs.is_empty());
9582                 assert!(updates.update_fee.is_none());
9583                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
9584                 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
9585                 expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_announce.contents.short_channel_id, true);
9586         }
9587 }
9588
9589 #[test]
9590 fn test_tx_confirmed_skipping_blocks_immediate_broadcast() {
9591         do_test_tx_confirmed_skipping_blocks_immediate_broadcast(false);
9592         do_test_tx_confirmed_skipping_blocks_immediate_broadcast(true);
9593 }
9594
9595 fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) {
9596         let chanmon_cfgs = create_chanmon_cfgs(2);
9597         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9598         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9599         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9600
9601         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9602
9603         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
9604                 .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
9605         let route = get_route!(nodes[0], payment_params, 10_000).unwrap();
9606
9607         let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[1]);
9608
9609         {
9610                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
9611                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
9612                 check_added_monitors!(nodes[0], 1);
9613                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9614                 assert_eq!(events.len(), 1);
9615                 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
9616                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9617                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
9618         }
9619         expect_pending_htlcs_forwardable!(nodes[1]);
9620         expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 10_000);
9621
9622         {
9623                 // Note that we use a different PaymentId here to allow us to duplicativly pay
9624                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
9625                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_secret.0)).unwrap();
9626                 check_added_monitors!(nodes[0], 1);
9627                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9628                 assert_eq!(events.len(), 1);
9629                 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
9630                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9631                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
9632                 // At this point, nodes[1] would notice it has too much value for the payment. It will
9633                 // assume the second is a privacy attack (no longer particularly relevant
9634                 // post-payment_secrets) and fail back the new HTLC. Previously, it'd also have failed back
9635                 // the first HTLC delivered above.
9636         }
9637
9638         expect_pending_htlcs_forwardable_ignore!(nodes[1]);
9639         nodes[1].node.process_pending_htlc_forwards();
9640
9641         if test_for_second_fail_panic {
9642                 // Now we go fail back the first HTLC from the user end.
9643                 nodes[1].node.fail_htlc_backwards(&our_payment_hash);
9644
9645                 let expected_destinations = vec![
9646                         HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
9647                         HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
9648                 ];
9649                 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1],  expected_destinations);
9650                 nodes[1].node.process_pending_htlc_forwards();
9651
9652                 check_added_monitors!(nodes[1], 1);
9653                 let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9654                 assert_eq!(fail_updates_1.update_fail_htlcs.len(), 2);
9655
9656                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9657                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[1]);
9658                 commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
9659
9660                 let failure_events = nodes[0].node.get_and_clear_pending_events();
9661                 assert_eq!(failure_events.len(), 4);
9662                 if let Event::PaymentPathFailed { .. } = failure_events[0] {} else { panic!(); }
9663                 if let Event::PaymentFailed { .. } = failure_events[1] {} else { panic!(); }
9664                 if let Event::PaymentPathFailed { .. } = failure_events[2] {} else { panic!(); }
9665                 if let Event::PaymentFailed { .. } = failure_events[3] {} else { panic!(); }
9666         } else {
9667                 // Let the second HTLC fail and claim the first
9668                 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
9669                 nodes[1].node.process_pending_htlc_forwards();
9670
9671                 check_added_monitors!(nodes[1], 1);
9672                 let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9673                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9674                 commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
9675
9676                 expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new());
9677
9678                 claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
9679         }
9680 }
9681
9682 #[test]
9683 fn test_dup_htlc_second_fail_panic() {
9684         // Previously, if we received two HTLCs back-to-back, where the second overran the expected
9685         // value for the payment, we'd fail back both HTLCs after generating a `PaymentClaimable` event.
9686         // Then, if the user failed the second payment, they'd hit a "tried to fail an already failed
9687         // HTLC" debug panic. This tests for this behavior, checking that only one HTLC is auto-failed.
9688         do_test_dup_htlc_second_rejected(true);
9689 }
9690
9691 #[test]
9692 fn test_dup_htlc_second_rejected() {
9693         // Test that if we receive a second HTLC for an MPP payment that overruns the payment amount we
9694         // simply reject the second HTLC but are still able to claim the first HTLC.
9695         do_test_dup_htlc_second_rejected(false);
9696 }
9697
9698 #[test]
9699 fn test_inconsistent_mpp_params() {
9700         // Test that if we recieve two HTLCs with different payment parameters we fail back the first
9701         // such HTLC and allow the second to stay.
9702         let chanmon_cfgs = create_chanmon_cfgs(4);
9703         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9704         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9705         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9706
9707         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0);
9708         create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0);
9709         create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0);
9710         let chan_2_3 =create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0);
9711
9712         let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV)
9713                 .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap();
9714         let mut route = get_route!(nodes[0], payment_params, 15_000_000).unwrap();
9715         assert_eq!(route.paths.len(), 2);
9716         route.paths.sort_by(|path_a, _| {
9717                 // Sort the path so that the path through nodes[1] comes first
9718                 if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() {
9719                         core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
9720         });
9721
9722         let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[3]);
9723
9724         let cur_height = nodes[0].best_block_info().1;
9725         let payment_id = PaymentId([42; 32]);
9726
9727         let session_privs = {
9728                 // We create a fake route here so that we start with three pending HTLCs, which we'll
9729                 // ultimately have, just not right away.
9730                 let mut dup_route = route.clone();
9731                 dup_route.paths.push(route.paths[1].clone());
9732                 nodes[0].node.test_add_new_pending_payment(our_payment_hash,
9733                         RecipientOnionFields::secret_only(our_payment_secret), payment_id, &dup_route).unwrap()
9734         };
9735         nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash,
9736                 RecipientOnionFields::secret_only(our_payment_secret), 15_000_000, cur_height, payment_id,
9737                 &None, session_privs[0]).unwrap();
9738         check_added_monitors!(nodes[0], 1);
9739
9740         {
9741                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9742                 assert_eq!(events.len(), 1);
9743                 pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), false, None);
9744         }
9745         assert!(nodes[3].node.get_and_clear_pending_events().is_empty());
9746
9747         nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash,
9748                 RecipientOnionFields::secret_only(our_payment_secret), 14_000_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
9749         check_added_monitors!(nodes[0], 1);
9750
9751         {
9752                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9753                 assert_eq!(events.len(), 1);
9754                 let payment_event = SendEvent::from_event(events.pop().unwrap());
9755
9756                 nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9757                 commitment_signed_dance!(nodes[2], nodes[0], payment_event.commitment_msg, false);
9758
9759                 expect_pending_htlcs_forwardable!(nodes[2]);
9760                 check_added_monitors!(nodes[2], 1);
9761
9762                 let mut events = nodes[2].node.get_and_clear_pending_msg_events();
9763                 assert_eq!(events.len(), 1);
9764                 let payment_event = SendEvent::from_event(events.pop().unwrap());
9765
9766                 nodes[3].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
9767                 check_added_monitors!(nodes[3], 0);
9768                 commitment_signed_dance!(nodes[3], nodes[2], payment_event.commitment_msg, true, true);
9769
9770                 // At this point, nodes[3] should notice the two HTLCs don't contain the same total payment
9771                 // amount. It will assume the second is a privacy attack (no longer particularly relevant
9772                 // post-payment_secrets) and fail back the new HTLC.
9773         }
9774         expect_pending_htlcs_forwardable_ignore!(nodes[3]);
9775         nodes[3].node.process_pending_htlc_forwards();
9776         expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
9777         nodes[3].node.process_pending_htlc_forwards();
9778
9779         check_added_monitors!(nodes[3], 1);
9780
9781         let fail_updates_1 = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
9782         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9783         commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false);
9784
9785         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]);
9786         check_added_monitors!(nodes[2], 1);
9787
9788         let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
9789         nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &fail_updates_2.update_fail_htlcs[0]);
9790         commitment_signed_dance!(nodes[0], nodes[2], fail_updates_2.commitment_signed, false);
9791
9792         expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain());
9793
9794         nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash,
9795                 RecipientOnionFields::secret_only(our_payment_secret), 15_000_000, cur_height, payment_id,
9796                 &None, session_privs[2]).unwrap();
9797         check_added_monitors!(nodes[0], 1);
9798
9799         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9800         assert_eq!(events.len(), 1);
9801         pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), true, None);
9802
9803         do_claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, our_payment_preimage);
9804         expect_payment_sent(&nodes[0], our_payment_preimage, Some(None), true, true);
9805 }
9806
9807 #[test]
9808 fn test_double_partial_claim() {
9809         // Test what happens if a node receives a payment, generates a PaymentClaimable event, the HTLCs
9810         // time out, the sender resends only some of the MPP parts, then the user processes the
9811         // PaymentClaimable event, ensuring they don't inadvertently claim only part of the full payment
9812         // amount.
9813         let chanmon_cfgs = create_chanmon_cfgs(4);
9814         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9815         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9816         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9817
9818         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0);
9819         create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0);
9820         create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0);
9821         create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0);
9822
9823         let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000);
9824         assert_eq!(route.paths.len(), 2);
9825         route.paths.sort_by(|path_a, _| {
9826                 // Sort the path so that the path through nodes[1] comes first
9827                 if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() {
9828                         core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
9829         });
9830
9831         send_along_route_with_secret(&nodes[0], route.clone(), &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 15_000_000, payment_hash, payment_secret);
9832         // nodes[3] has now received a PaymentClaimable event...which it will take some (exorbitant)
9833         // amount of time to respond to.
9834
9835         // Connect some blocks to time out the payment
9836         connect_blocks(&nodes[3], TEST_FINAL_CLTV);
9837         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // To get the same height for sending later
9838
9839         let failed_destinations = vec![
9840                 HTLCDestination::FailedPayment { payment_hash },
9841                 HTLCDestination::FailedPayment { payment_hash },
9842         ];
9843         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations);
9844
9845         pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash, PaymentFailureReason::RecipientRejected);
9846
9847         // nodes[1] now retries one of the two paths...
9848         nodes[0].node.send_payment_with_route(&route, payment_hash,
9849                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9850         check_added_monitors!(nodes[0], 2);
9851
9852         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9853         assert_eq!(events.len(), 2);
9854         let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
9855         pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_1_msgs, false, None);
9856
9857         // At this point nodes[3] has received one half of the payment, and the user goes to handle
9858         // that PaymentClaimable event they got hours ago and never handled...we should refuse to claim.
9859         nodes[3].node.claim_funds(payment_preimage);
9860         check_added_monitors!(nodes[3], 0);
9861         assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty());
9862 }
9863
9864 /// The possible events which may trigger a `max_dust_htlc_exposure` breach
9865 #[derive(Clone, Copy, PartialEq)]
9866 enum ExposureEvent {
9867         /// Breach occurs at HTLC forwarding (see `send_htlc`)
9868         AtHTLCForward,
9869         /// Breach occurs at HTLC reception (see `update_add_htlc`)
9870         AtHTLCReception,
9871         /// Breach occurs at outbound update_fee (see `send_update_fee`)
9872         AtUpdateFeeOutbound,
9873 }
9874
9875 fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool, multiplier_dust_limit: bool, apply_excess_fee: bool) {
9876         // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat`
9877         // policy.
9878         //
9879         // At HTLC forward (`send_payment()`), if the sum of the trimmed-to-dust HTLC inbound and
9880         // trimmed-to-dust HTLC outbound balance and this new payment as included on next
9881         // counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll reject the
9882         // update. At HTLC reception (`update_add_htlc()`), if the sum of the trimmed-to-dust HTLC
9883         // inbound and trimmed-to-dust HTLC outbound balance and this new received HTLC as included
9884         // on next counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll fail
9885         // the update. Note, we return a `temporary_channel_failure` (0x1000 | 7), as the channel
9886         // might be available again for HTLC processing once the dust bandwidth has cleared up.
9887
9888         let chanmon_cfgs = create_chanmon_cfgs(2);
9889         let mut config = test_default_channel_config();
9890
9891         // We hard-code the feerate values here but they're re-calculated furter down and asserted.
9892         // If the values ever change below these constants should simply be updated.
9893         const AT_FEE_OUTBOUND_HTLCS: u64 = 20;
9894         let nondust_htlc_count_in_limit =
9895         if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound  {
9896                 AT_FEE_OUTBOUND_HTLCS
9897         } else { 0 };
9898         let initial_feerate = if apply_excess_fee { 253 * 2 } else { 253 };
9899         let expected_dust_buffer_feerate = initial_feerate + 2530;
9900         let mut commitment_tx_cost = commit_tx_fee_msat(initial_feerate - 253, nondust_htlc_count_in_limit, &ChannelTypeFeatures::empty());
9901         commitment_tx_cost +=
9902                 if on_holder_tx {
9903                         htlc_success_tx_weight(&ChannelTypeFeatures::empty())
9904                 } else {
9905                         htlc_timeout_tx_weight(&ChannelTypeFeatures::empty())
9906                 } * (initial_feerate as u64 - 253) / 1000 * nondust_htlc_count_in_limit;
9907         {
9908                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
9909                 *feerate_lock = initial_feerate;
9910         }
9911         config.channel_config.max_dust_htlc_exposure = if multiplier_dust_limit {
9912                 // Default test fee estimator rate is 253 sat/kw, so we set the multiplier to 5_000_000 / 253
9913                 // to get roughly the same initial value as the default setting when this test was
9914                 // originally written.
9915                 MaxDustHTLCExposure::FeeRateMultiplier((5_000_000 + commitment_tx_cost) / 253)
9916         } else { MaxDustHTLCExposure::FixedLimitMsat(5_000_000 + commitment_tx_cost) };
9917         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9918         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
9919         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9920
9921         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap();
9922         let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9923         open_channel.common_fields.max_htlc_value_in_flight_msat = 50_000_000;
9924         open_channel.common_fields.max_accepted_htlcs = 60;
9925         if on_holder_tx {
9926                 open_channel.common_fields.dust_limit_satoshis = 546;
9927         }
9928         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
9929         let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
9930         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
9931
9932         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9933
9934         let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
9935
9936         if on_holder_tx {
9937                 let mut node_0_per_peer_lock;
9938                 let mut node_0_peer_state_lock;
9939                 match get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id) {
9940                         ChannelPhase::UnfundedOutboundV1(chan) => {
9941                                 chan.context.holder_dust_limit_satoshis = 546;
9942                         },
9943                         _ => panic!("Unexpected ChannelPhase variant"),
9944                 }
9945         }
9946
9947         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9948         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
9949         check_added_monitors!(nodes[1], 1);
9950         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9951
9952         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
9953         check_added_monitors!(nodes[0], 1);
9954         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9955
9956         let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
9957         let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
9958         update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
9959
9960         {
9961                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
9962                 *feerate_lock = 253;
9963         }
9964
9965         // Fetch a route in advance as we will be unable to once we're unable to send.
9966         let (mut route, payment_hash, _, payment_secret) =
9967                 get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
9968
9969         let (dust_buffer_feerate, max_dust_htlc_exposure_msat) = {
9970                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
9971                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
9972                 let chan = chan_lock.channel_by_id.get(&channel_id).unwrap();
9973                 (chan.context().get_dust_buffer_feerate(None) as u64,
9974                 chan.context().get_max_dust_htlc_exposure_msat(253))
9975         };
9976         assert_eq!(dust_buffer_feerate, expected_dust_buffer_feerate as u64);
9977         let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - 1) * 1000;
9978         let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
9979
9980         // Substract 3 sats for multiplier and 2 sats for fixed limit to make sure we are 50% below the dust limit.
9981         // This is to make sure we fully use the dust limit. If we don't, we could end up with `dust_ibd_htlc_on_holder_tx` being 1
9982         // while `max_dust_htlc_exposure_msat` is not equal to `dust_outbound_htlc_on_holder_tx_msat`.
9983         let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - if multiplier_dust_limit { 3 } else { 2 }) * 1000;
9984         let dust_inbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat;
9985
9986         // This test was written with a fixed dust value here, which we retain, but assert that it is,
9987         // indeed, dust on both transactions.
9988         let dust_htlc_on_counterparty_tx: u64 = 4;
9989         let dust_htlc_on_counterparty_tx_msat: u64 = 1_250_000;
9990         let calcd_dust_htlc_on_counterparty_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - if multiplier_dust_limit { 3 } else { 2 }) * 1000;
9991         assert!(dust_htlc_on_counterparty_tx_msat < dust_inbound_htlc_on_holder_tx_msat);
9992         assert!(dust_htlc_on_counterparty_tx_msat < calcd_dust_htlc_on_counterparty_tx_msat);
9993
9994         if on_holder_tx {
9995                 if dust_outbound_balance {
9996                         // Outbound dust threshold: 2223 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`)
9997                         // Outbound dust balance: 4372 sats
9998                         // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2132 sats
9999                         for _ in 0..dust_outbound_htlc_on_holder_tx {
10000                                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_outbound_htlc_on_holder_tx_msat);
10001                                 nodes[0].node.send_payment_with_route(&route, payment_hash,
10002                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10003                         }
10004                 } else {
10005                         // Inbound dust threshold: 2324 sats (`dust_buffer_feerate` * HTLC_SUCCESS_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`)
10006                         // Inbound dust balance: 4372 sats
10007                         // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2031 sats
10008                         for _ in 0..dust_inbound_htlc_on_holder_tx {
10009                                 route_payment(&nodes[1], &[&nodes[0]], dust_inbound_htlc_on_holder_tx_msat);
10010                         }
10011                 }
10012         } else {
10013                 if dust_outbound_balance {
10014                         // Outbound dust threshold: 2132 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`)
10015                         // Outbound dust balance: 5000 sats
10016                         for _ in 0..dust_htlc_on_counterparty_tx - 1 {
10017                                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_htlc_on_counterparty_tx_msat);
10018                                 nodes[0].node.send_payment_with_route(&route, payment_hash,
10019                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10020                         }
10021                 } else {
10022                         // Inbound dust threshold: 2031 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`)
10023                         // Inbound dust balance: 5000 sats
10024                         for _ in 0..dust_htlc_on_counterparty_tx - 1 {
10025                                 route_payment(&nodes[1], &[&nodes[0]], dust_htlc_on_counterparty_tx_msat);
10026                         }
10027                 }
10028         }
10029
10030         if exposure_breach_event == ExposureEvent::AtHTLCForward {
10031                 route.paths[0].hops.last_mut().unwrap().fee_msat =
10032                         if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 1 };
10033                 // With default dust exposure: 5000 sats
10034                 if on_holder_tx {
10035                         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash,
10036                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
10037                                 ), true, APIError::ChannelUnavailable { .. }, {});
10038                 } else {
10039                         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash,
10040                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
10041                                 ), true, APIError::ChannelUnavailable { .. }, {});
10042                 }
10043         } else if exposure_breach_event == ExposureEvent::AtHTLCReception {
10044                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 4 });
10045                 nodes[1].node.send_payment_with_route(&route, payment_hash,
10046                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10047                 check_added_monitors!(nodes[1], 1);
10048                 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
10049                 assert_eq!(events.len(), 1);
10050                 let payment_event = SendEvent::from_event(events.remove(0));
10051                 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
10052                 // With default dust exposure: 5000 sats
10053                 if on_holder_tx {
10054                         // Outbound dust balance: 6399 sats
10055                         let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1);
10056                         let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat;
10057                         nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, max_dust_htlc_exposure_msat), 1);
10058                 } else {
10059                         // Outbound dust balance: 5200 sats
10060                         nodes[0].logger.assert_log("lightning::ln::channel",
10061                                 format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
10062                                         dust_htlc_on_counterparty_tx_msat * dust_htlc_on_counterparty_tx + commitment_tx_cost + 4,
10063                                         max_dust_htlc_exposure_msat), 1);
10064                 }
10065         } else if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound {
10066                 route.paths[0].hops.last_mut().unwrap().fee_msat = 2_500_000;
10067                 // For the multiplier dust exposure limit, since it scales with feerate,
10068                 // we need to add a lot of HTLCs that will become dust at the new feerate
10069                 // to cross the threshold.
10070                 for _ in 0..AT_FEE_OUTBOUND_HTLCS {
10071                         let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[1], Some(1_000), None);
10072                         nodes[0].node.send_payment_with_route(&route, payment_hash,
10073                                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10074                 }
10075                 {
10076                         let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10077                         *feerate_lock = *feerate_lock * 10;
10078                 }
10079                 nodes[0].node.timer_tick_occurred();
10080                 check_added_monitors!(nodes[0], 1);
10081                 nodes[0].logger.assert_log_contains("lightning::ln::channel", "Cannot afford to send new feerate at 2530 without infringing max dust htlc exposure", 1);
10082         }
10083
10084         let _ = nodes[0].node.get_and_clear_pending_msg_events();
10085         let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
10086         added_monitors.clear();
10087 }
10088
10089 fn do_test_max_dust_htlc_exposure_by_threshold_type(multiplier_dust_limit: bool, apply_excess_fee: bool) {
10090         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit, apply_excess_fee);
10091         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit, apply_excess_fee);
10092         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit, apply_excess_fee);
10093         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit, apply_excess_fee);
10094         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit, apply_excess_fee);
10095         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit, apply_excess_fee);
10096         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit, apply_excess_fee);
10097         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit, apply_excess_fee);
10098         if !multiplier_dust_limit && !apply_excess_fee {
10099                 // Because non-dust HTLC transaction fees are included in the dust exposure, trying to
10100                 // increase the fee to hit a higher dust exposure with a
10101                 // `MaxDustHTLCExposure::FeeRateMultiplier` is no longer super practical, so we skip these
10102                 // in the `multiplier_dust_limit` case.
10103                 do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit, apply_excess_fee);
10104                 do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit, apply_excess_fee);
10105                 do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit, apply_excess_fee);
10106                 do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit, apply_excess_fee);
10107         }
10108 }
10109
10110 #[test]
10111 fn test_max_dust_htlc_exposure() {
10112         do_test_max_dust_htlc_exposure_by_threshold_type(false, false);
10113         do_test_max_dust_htlc_exposure_by_threshold_type(false, true);
10114         do_test_max_dust_htlc_exposure_by_threshold_type(true, false);
10115         do_test_max_dust_htlc_exposure_by_threshold_type(true, true);
10116 }
10117
10118 #[test]
10119 fn test_nondust_htlc_fees_are_dust() {
10120         // Test that the transaction fees paid in nondust HTLCs count towards our dust limit
10121         let chanmon_cfgs = create_chanmon_cfgs(3);
10122         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10123
10124         let mut config = test_default_channel_config();
10125         // Set the dust limit to the default value
10126         config.channel_config.max_dust_htlc_exposure =
10127                 MaxDustHTLCExposure::FeeRateMultiplier(10_000);
10128         // Make sure the HTLC limits don't get in the way
10129         config.channel_handshake_limits.min_max_accepted_htlcs = 400;
10130         config.channel_handshake_config.our_max_accepted_htlcs = 400;
10131         config.channel_handshake_config.our_htlc_minimum_msat = 1;
10132
10133         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config), Some(config), Some(config)]);
10134         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10135
10136         // Create a channel from 1 -> 0 but immediately push all of the funds towards 0
10137         let chan_id_1 = create_announced_chan_between_nodes(&nodes, 1, 0).2;
10138         while nodes[1].node.list_channels()[0].next_outbound_htlc_limit_msat > 0 {
10139                 send_payment(&nodes[1], &[&nodes[0]], nodes[1].node.list_channels()[0].next_outbound_htlc_limit_msat);
10140         }
10141
10142         // First get the channel one HTLC_VALUE HTLC away from the dust limit by sending dust HTLCs
10143         // repeatedly until we run out of space.
10144         const HTLC_VALUE: u64 = 1_000_000; // Doesn't matter, tune until the test passes
10145         let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], HTLC_VALUE).0;
10146
10147         while nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat == 0 {
10148                 route_payment(&nodes[0], &[&nodes[1]], HTLC_VALUE);
10149         }
10150         assert_ne!(nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat, 0,
10151                 "We don't want to run out of ability to send because of some non-dust limit");
10152         assert!(nodes[0].node.list_channels()[0].pending_outbound_htlcs.len() < 10,
10153                 "We should be able to fill our dust limit without too many HTLCs");
10154
10155         let dust_limit = nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat;
10156         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
10157         assert_ne!(nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat, 0,
10158                 "Make sure we are able to send once we clear one HTLC");
10159
10160         // At this point we have somewhere between dust_limit and dust_limit * 2 left in our dust
10161         // exposure limit, and we want to max that out using non-dust HTLCs.
10162         let commitment_tx_per_htlc_cost =
10163                 htlc_success_tx_weight(&ChannelTypeFeatures::empty()) * 253;
10164         let max_htlcs_remaining = dust_limit * 2 / commitment_tx_per_htlc_cost;
10165         assert!(max_htlcs_remaining < 30,
10166                 "We should be able to fill our dust limit without too many HTLCs");
10167         for i in 0..max_htlcs_remaining + 1 {
10168                 assert_ne!(i, max_htlcs_remaining);
10169                 if nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat < dust_limit {
10170                         // We found our limit, and it was less than max_htlcs_remaining!
10171                         // At this point we can only send dust HTLCs as any non-dust HTLCs will overuse our
10172                         // remaining dust exposure.
10173                         break;
10174                 }
10175                 route_payment(&nodes[0], &[&nodes[1]], dust_limit * 2);
10176         }
10177
10178         // At this point non-dust HTLCs are no longer accepted from node 0 -> 1, we also check that
10179         // such HTLCs can't be routed over the same channel either.
10180         create_announced_chan_between_nodes(&nodes, 2, 0);
10181         let (route, payment_hash, _, payment_secret) =
10182                 get_route_and_payment_hash!(nodes[2], nodes[1], dust_limit * 2);
10183         let onion = RecipientOnionFields::secret_only(payment_secret);
10184         nodes[2].node.send_payment_with_route(&route, payment_hash, onion, PaymentId([0; 32])).unwrap();
10185         check_added_monitors(&nodes[2], 1);
10186         let send = SendEvent::from_node(&nodes[2]);
10187
10188         nodes[0].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send.msgs[0]);
10189         commitment_signed_dance!(nodes[0], nodes[2], send.commitment_msg, false, true);
10190
10191         expect_pending_htlcs_forwardable!(nodes[0]);
10192         check_added_monitors(&nodes[0], 1);
10193         let node_id_1 = nodes[1].node.get_our_node_id();
10194         expect_htlc_handling_failed_destinations!(
10195                 nodes[0].node.get_and_clear_pending_events(),
10196                 &[HTLCDestination::NextHopChannel { node_id: Some(node_id_1), channel_id: chan_id_1 }]
10197         );
10198
10199         let fail = get_htlc_update_msgs(&nodes[0], &nodes[2].node.get_our_node_id());
10200         nodes[2].node.handle_update_fail_htlc(&nodes[0].node.get_our_node_id(), &fail.update_fail_htlcs[0]);
10201         commitment_signed_dance!(nodes[2], nodes[0], fail.commitment_signed, false);
10202         expect_payment_failed_conditions(&nodes[2], payment_hash, false, PaymentFailedConditions::new());
10203 }
10204
10205
10206 #[test]
10207 fn test_non_final_funding_tx() {
10208         let chanmon_cfgs = create_chanmon_cfgs(2);
10209         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10210         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10211         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10212
10213         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
10214         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10215         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10216         let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10217         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10218
10219         let best_height = nodes[0].node.best_block.read().unwrap().height;
10220
10221         let chan_id = *nodes[0].network_chan_count.borrow();
10222         let events = nodes[0].node.get_and_clear_pending_events();
10223         let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::ScriptBuf::new(), sequence: Sequence(1), witness: Witness::from_slice(&[&[1]]) };
10224         assert_eq!(events.len(), 1);
10225         let mut tx = match events[0] {
10226                 Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => {
10227                         // Timelock the transaction _beyond_ the best client height + 1.
10228                         Transaction { version: chan_id as i32, lock_time: LockTime::from_height(best_height + 2).unwrap(), input: vec![input], output: vec![TxOut {
10229                                 value: *channel_value_satoshis, script_pubkey: output_script.clone(),
10230                         }]}
10231                 },
10232                 _ => panic!("Unexpected event"),
10233         };
10234         // Transaction should fail as it's evaluated as non-final for propagation.
10235         match nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()) {
10236                 Err(APIError::APIMisuseError { err }) => {
10237                         assert_eq!(format!("Funding transaction absolute timelock is non-final"), err);
10238                 },
10239                 _ => panic!()
10240         }
10241         let err = "Error in transaction funding: Misuse error: Funding transaction absolute timelock is non-final".to_owned();
10242         check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(temp_channel_id, false, ClosureReason::ProcessingError { err })]);
10243         assert_eq!(get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id()).data, "Failed to fund channel");
10244 }
10245
10246 #[test]
10247 fn test_non_final_funding_tx_within_headroom() {
10248         let chanmon_cfgs = create_chanmon_cfgs(2);
10249         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10250         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10251         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10252
10253         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
10254         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10255         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10256         let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10257         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10258
10259         let best_height = nodes[0].node.best_block.read().unwrap().height;
10260
10261         let chan_id = *nodes[0].network_chan_count.borrow();
10262         let events = nodes[0].node.get_and_clear_pending_events();
10263         let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::ScriptBuf::new(), sequence: Sequence(1), witness: Witness::from_slice(&[[1]]) };
10264         assert_eq!(events.len(), 1);
10265         let mut tx = match events[0] {
10266                 Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => {
10267                         // Timelock the transaction within a +1 headroom from the best block.
10268                         Transaction { version: chan_id as i32, lock_time: LockTime::from_consensus(best_height + 1), input: vec![input], output: vec![TxOut {
10269                                 value: *channel_value_satoshis, script_pubkey: output_script.clone(),
10270                         }]}
10271                 },
10272                 _ => panic!("Unexpected event"),
10273         };
10274
10275         // Transaction should be accepted if it's in a +1 headroom from best block.
10276         assert!(nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
10277         get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
10278 }
10279
10280 #[test]
10281 fn accept_busted_but_better_fee() {
10282         // If a peer sends us a fee update that is too low, but higher than our previous channel
10283         // feerate, we should accept it. In the future we may want to consider closing the channel
10284         // later, but for now we only accept the update.
10285         let mut chanmon_cfgs = create_chanmon_cfgs(2);
10286         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10287         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10288         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10289
10290         create_chan_between_nodes(&nodes[0], &nodes[1]);
10291
10292         // Set nodes[1] to expect 5,000 sat/kW.
10293         {
10294                 let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
10295                 *feerate_lock = 5000;
10296         }
10297
10298         // If nodes[0] increases their feerate, even if its not enough, nodes[1] should accept it.
10299         {
10300                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10301                 *feerate_lock = 1000;
10302         }
10303         nodes[0].node.timer_tick_occurred();
10304         check_added_monitors!(nodes[0], 1);
10305
10306         let events = nodes[0].node.get_and_clear_pending_msg_events();
10307         assert_eq!(events.len(), 1);
10308         match events[0] {
10309                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
10310                         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
10311                         commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false);
10312                 },
10313                 _ => panic!("Unexpected event"),
10314         };
10315
10316         // If nodes[0] increases their feerate further, even if its not enough, nodes[1] should accept
10317         // it.
10318         {
10319                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10320                 *feerate_lock = 2000;
10321         }
10322         nodes[0].node.timer_tick_occurred();
10323         check_added_monitors!(nodes[0], 1);
10324
10325         let events = nodes[0].node.get_and_clear_pending_msg_events();
10326         assert_eq!(events.len(), 1);
10327         match events[0] {
10328                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
10329                         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
10330                         commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false);
10331                 },
10332                 _ => panic!("Unexpected event"),
10333         };
10334
10335         // However, if nodes[0] decreases their feerate, nodes[1] should reject it and close the
10336         // channel.
10337         {
10338                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10339                 *feerate_lock = 1000;
10340         }
10341         nodes[0].node.timer_tick_occurred();
10342         check_added_monitors!(nodes[0], 1);
10343
10344         let events = nodes[0].node.get_and_clear_pending_msg_events();
10345         assert_eq!(events.len(), 1);
10346         match events[0] {
10347                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
10348                         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
10349                         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError {
10350                                 err: "Peer's feerate much too low. Actual: 1000. Our expected lower limit: 5000".to_owned() },
10351                                 [nodes[0].node.get_our_node_id()], 100000);
10352                         check_closed_broadcast!(nodes[1], true);
10353                         check_added_monitors!(nodes[1], 1);
10354                 },
10355                 _ => panic!("Unexpected event"),
10356         };
10357 }
10358
10359 fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash: bool) {
10360         let mut chanmon_cfgs = create_chanmon_cfgs(2);
10361         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10362         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10363         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10364         let min_final_cltv_expiry_delta = 120;
10365         let final_cltv_expiry_delta = if valid_delta { min_final_cltv_expiry_delta + 2 } else {
10366                 min_final_cltv_expiry_delta - 2 };
10367         let recv_value = 100_000;
10368
10369         create_chan_between_nodes(&nodes[0], &nodes[1]);
10370
10371         let payment_parameters = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), final_cltv_expiry_delta as u32);
10372         let (payment_hash, payment_preimage, payment_secret) = if use_user_hash {
10373                 let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[1],
10374                         Some(recv_value), Some(min_final_cltv_expiry_delta));
10375                 (payment_hash, payment_preimage, payment_secret)
10376         } else {
10377                 let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(recv_value), 7200, Some(min_final_cltv_expiry_delta)).unwrap();
10378                 (payment_hash, nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap(), payment_secret)
10379         };
10380         let route = get_route!(nodes[0], payment_parameters, recv_value).unwrap();
10381         nodes[0].node.send_payment_with_route(&route, payment_hash,
10382                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10383         check_added_monitors!(nodes[0], 1);
10384         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
10385         assert_eq!(events.len(), 1);
10386         let mut payment_event = SendEvent::from_event(events.pop().unwrap());
10387         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
10388         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
10389         expect_pending_htlcs_forwardable!(nodes[1]);
10390
10391         if valid_delta {
10392                 expect_payment_claimable!(nodes[1], payment_hash, payment_secret, recv_value, if use_user_hash {
10393                         None } else { Some(payment_preimage) }, nodes[1].node.get_our_node_id());
10394
10395                 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
10396         } else {
10397                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
10398
10399                 check_added_monitors!(nodes[1], 1);
10400
10401                 let fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
10402                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates.update_fail_htlcs[0]);
10403                 commitment_signed_dance!(nodes[0], nodes[1], fail_updates.commitment_signed, false, true);
10404
10405                 expect_payment_failed!(nodes[0], payment_hash, true);
10406         }
10407 }
10408
10409 #[test]
10410 fn test_payment_with_custom_min_cltv_expiry_delta() {
10411         do_payment_with_custom_min_final_cltv_expiry(false, false);
10412         do_payment_with_custom_min_final_cltv_expiry(false, true);
10413         do_payment_with_custom_min_final_cltv_expiry(true, false);
10414         do_payment_with_custom_min_final_cltv_expiry(true, true);
10415 }
10416
10417 #[test]
10418 fn test_disconnects_peer_awaiting_response_ticks() {
10419         // Tests that nodes which are awaiting on a response critical for channel responsiveness
10420         // disconnect their counterparty after `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10421         let mut chanmon_cfgs = create_chanmon_cfgs(2);
10422         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10423         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10424         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10425
10426         // Asserts a disconnect event is queued to the user.
10427         let check_disconnect_event = |node: &Node, should_disconnect: bool| {
10428                 let disconnect_event = node.node.get_and_clear_pending_msg_events().iter().find_map(|event|
10429                         if let MessageSendEvent::HandleError { action, .. } = event {
10430                                 if let msgs::ErrorAction::DisconnectPeerWithWarning { .. } = action {
10431                                         Some(())
10432                                 } else {
10433                                         None
10434                                 }
10435                         } else {
10436                                 None
10437                         }
10438                 );
10439                 assert_eq!(disconnect_event.is_some(), should_disconnect);
10440         };
10441
10442         // Fires timer ticks ensuring we only attempt to disconnect peers after reaching
10443         // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10444         let check_disconnect = |node: &Node| {
10445                 // No disconnect without any timer ticks.
10446                 check_disconnect_event(node, false);
10447
10448                 // No disconnect with 1 timer tick less than required.
10449                 for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS - 1 {
10450                         node.node.timer_tick_occurred();
10451                         check_disconnect_event(node, false);
10452                 }
10453
10454                 // Disconnect after reaching the required ticks.
10455                 node.node.timer_tick_occurred();
10456                 check_disconnect_event(node, true);
10457
10458                 // Disconnect again on the next tick if the peer hasn't been disconnected yet.
10459                 node.node.timer_tick_occurred();
10460                 check_disconnect_event(node, true);
10461         };
10462
10463         create_chan_between_nodes(&nodes[0], &nodes[1]);
10464
10465         // We'll start by performing a fee update with Alice (nodes[0]) on the channel.
10466         *nodes[0].fee_estimator.sat_per_kw.lock().unwrap() *= 2;
10467         nodes[0].node.timer_tick_occurred();
10468         check_added_monitors!(&nodes[0], 1);
10469         let alice_fee_update = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
10470         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), alice_fee_update.update_fee.as_ref().unwrap());
10471         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &alice_fee_update.commitment_signed);
10472         check_added_monitors!(&nodes[1], 1);
10473
10474         // This will prompt Bob (nodes[1]) to respond with his `CommitmentSigned` and `RevokeAndACK`.
10475         let (bob_revoke_and_ack, bob_commitment_signed) = get_revoke_commit_msgs!(&nodes[1], nodes[0].node.get_our_node_id());
10476         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revoke_and_ack);
10477         check_added_monitors!(&nodes[0], 1);
10478         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_commitment_signed);
10479         check_added_monitors(&nodes[0], 1);
10480
10481         // Alice then needs to send her final `RevokeAndACK` to complete the commitment dance. We
10482         // pretend Bob hasn't received the message and check whether he'll disconnect Alice after
10483         // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10484         let alice_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
10485         check_disconnect(&nodes[1]);
10486
10487         // Now, we'll reconnect them to test awaiting a `ChannelReestablish` message.
10488         //
10489         // Note that since the commitment dance didn't complete above, Alice is expected to resend her
10490         // final `RevokeAndACK` to Bob to complete it.
10491         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
10492         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10493         let bob_init = msgs::Init {
10494                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
10495         };
10496         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &bob_init, true).unwrap();
10497         let alice_init = msgs::Init {
10498                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
10499         };
10500         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &alice_init, true).unwrap();
10501
10502         // Upon reconnection, Alice sends her `ChannelReestablish` to Bob. Alice, however, hasn't
10503         // received Bob's yet, so she should disconnect him after reaching
10504         // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10505         let alice_channel_reestablish = get_event_msg!(
10506                 nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()
10507         );
10508         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &alice_channel_reestablish);
10509         check_disconnect(&nodes[0]);
10510
10511         // Bob now sends his `ChannelReestablish` to Alice to resume the channel and consider it "live".
10512         let bob_channel_reestablish = nodes[1].node.get_and_clear_pending_msg_events().iter().find_map(|event|
10513                 if let MessageSendEvent::SendChannelReestablish { node_id, msg } = event {
10514                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
10515                         Some(msg.clone())
10516                 } else {
10517                         None
10518                 }
10519         ).unwrap();
10520         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bob_channel_reestablish);
10521
10522         // Sanity check that Alice won't disconnect Bob since she's no longer waiting for any messages.
10523         for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS {
10524                 nodes[0].node.timer_tick_occurred();
10525                 check_disconnect_event(&nodes[0], false);
10526         }
10527
10528         // However, Bob is still waiting on Alice's `RevokeAndACK`, so he should disconnect her after
10529         // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10530         check_disconnect(&nodes[1]);
10531
10532         // Finally, have Bob process the last message.
10533         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &alice_revoke_and_ack);
10534         check_added_monitors(&nodes[1], 1);
10535
10536         // At this point, neither node should attempt to disconnect each other, since they aren't
10537         // waiting on any messages.
10538         for node in &nodes {
10539                 for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS {
10540                         node.node.timer_tick_occurred();
10541                         check_disconnect_event(node, false);
10542                 }
10543         }
10544 }
10545
10546 #[test]
10547 fn test_remove_expired_outbound_unfunded_channels() {
10548         let chanmon_cfgs = create_chanmon_cfgs(2);
10549         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10550         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10551         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10552
10553         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
10554         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10555         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10556         let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10557         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10558
10559         let events = nodes[0].node.get_and_clear_pending_events();
10560         assert_eq!(events.len(), 1);
10561         match events[0] {
10562                 Event::FundingGenerationReady { .. } => (),
10563                 _ => panic!("Unexpected event"),
10564         };
10565
10566         // Asserts the outbound channel has been removed from a nodes[0]'s peer state map.
10567         let check_outbound_channel_existence = |should_exist: bool| {
10568                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
10569                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
10570                 assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist);
10571         };
10572
10573         // Channel should exist without any timer ticks.
10574         check_outbound_channel_existence(true);
10575
10576         // Channel should exist with 1 timer tick less than required.
10577         for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
10578                 nodes[0].node.timer_tick_occurred();
10579                 check_outbound_channel_existence(true)
10580         }
10581
10582         // Remove channel after reaching the required ticks.
10583         nodes[0].node.timer_tick_occurred();
10584         check_outbound_channel_existence(false);
10585
10586         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
10587         assert_eq!(msg_events.len(), 1);
10588         match msg_events[0] {
10589                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
10590                         assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake");
10591                 },
10592                 _ => panic!("Unexpected event"),
10593         }
10594         check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
10595 }
10596
10597 #[test]
10598 fn test_remove_expired_inbound_unfunded_channels() {
10599         let chanmon_cfgs = create_chanmon_cfgs(2);
10600         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10601         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10602         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10603
10604         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
10605         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10606         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10607         let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10608         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10609
10610         let events = nodes[0].node.get_and_clear_pending_events();
10611         assert_eq!(events.len(), 1);
10612         match events[0] {
10613                 Event::FundingGenerationReady { .. } => (),
10614                 _ => panic!("Unexpected event"),
10615         };
10616
10617         // Asserts the inbound channel has been removed from a nodes[1]'s peer state map.
10618         let check_inbound_channel_existence = |should_exist: bool| {
10619                 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
10620                 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
10621                 assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist);
10622         };
10623
10624         // Channel should exist without any timer ticks.
10625         check_inbound_channel_existence(true);
10626
10627         // Channel should exist with 1 timer tick less than required.
10628         for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
10629                 nodes[1].node.timer_tick_occurred();
10630                 check_inbound_channel_existence(true)
10631         }
10632
10633         // Remove channel after reaching the required ticks.
10634         nodes[1].node.timer_tick_occurred();
10635         check_inbound_channel_existence(false);
10636
10637         let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
10638         assert_eq!(msg_events.len(), 1);
10639         match msg_events[0] {
10640                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
10641                         assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake");
10642                 },
10643                 _ => panic!("Unexpected event"),
10644         }
10645         check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
10646 }
10647
10648 #[test]
10649 fn test_channel_close_when_not_timely_accepted() {
10650         // Create network of two nodes
10651         let chanmon_cfgs = create_chanmon_cfgs(2);
10652         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10653         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10654         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10655
10656         // Simulate peer-disconnects mid-handshake
10657         // The channel is initiated from the node 0 side,
10658         // but the nodes disconnect before node 1 could send accept channel
10659         let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
10660         let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10661         assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id);
10662
10663         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
10664         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10665
10666         // Make sure that we have not removed the OutboundV1Channel from node[0] immediately.
10667         assert_eq!(nodes[0].node.list_channels().len(), 1);
10668
10669         // Since channel was inbound from node[1] perspective, it should have been dropped immediately.
10670         assert_eq!(nodes[1].node.list_channels().len(), 0);
10671
10672         // In the meantime, some time passes.
10673         for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS {
10674                 nodes[0].node.timer_tick_occurred();
10675         }
10676
10677         // Since we disconnected from peer and did not connect back within time,
10678         // we should have forced-closed the channel by now.
10679         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
10680         assert_eq!(nodes[0].node.list_channels().len(), 0);
10681
10682         {
10683                 // Since accept channel message was never received
10684                 // The channel should be forced close by now from node 0 side
10685                 // and the peer removed from per_peer_state
10686                 let node_0_per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
10687                 assert_eq!(node_0_per_peer_state.len(), 0);
10688         }
10689 }
10690
10691 #[test]
10692 fn test_rebroadcast_open_channel_when_reconnect_mid_handshake() {
10693         // Create network of two nodes
10694         let chanmon_cfgs = create_chanmon_cfgs(2);
10695         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10696         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10697         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10698
10699         // Simulate peer-disconnects mid-handshake
10700         // The channel is initiated from the node 0 side,
10701         // but the nodes disconnect before node 1 could send accept channel
10702         let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
10703         let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10704         assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id);
10705
10706         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
10707         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10708
10709         // Make sure that we have not removed the OutboundV1Channel from node[0] immediately.
10710         assert_eq!(nodes[0].node.list_channels().len(), 1);
10711
10712         // Since channel was inbound from node[1] perspective, it should have been immediately dropped.
10713         assert_eq!(nodes[1].node.list_channels().len(), 0);
10714
10715         // The peers now reconnect
10716         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
10717                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
10718         }, true).unwrap();
10719         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
10720                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
10721         }, false).unwrap();
10722
10723         // Make sure the SendOpenChannel message is added to node_0 pending message events
10724         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
10725         assert_eq!(msg_events.len(), 1);
10726         match &msg_events[0] {
10727                 MessageSendEvent::SendOpenChannel { msg, .. } => assert_eq!(msg, &open_channel_msg),
10728                 _ => panic!("Unexpected message."),
10729         }
10730 }
10731
10732 fn do_test_multi_post_event_actions(do_reload: bool) {
10733         // Tests handling multiple post-Event actions at once.
10734         // There is specific code in ChannelManager to handle channels where multiple post-Event
10735         // `ChannelMonitorUpdates` are pending at once. This test exercises that code.
10736         //
10737         // Specifically, we test calling `get_and_clear_pending_events` while there are two
10738         // PaymentSents from different channels and one channel has two pending `ChannelMonitorUpdate`s
10739         // - one from an RAA and one from an inbound commitment_signed.
10740         let chanmon_cfgs = create_chanmon_cfgs(3);
10741         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10742         let (persister, chain_monitor);
10743         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
10744         let nodes_0_deserialized;
10745         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10746
10747         let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
10748         let chan_id_2 = create_announced_chan_between_nodes(&nodes, 0, 2).2;
10749
10750         send_payment(&nodes[0], &[&nodes[1]], 1_000_000);
10751         send_payment(&nodes[0], &[&nodes[2]], 1_000_000);
10752
10753         let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
10754         let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[2]], 1_000_000);
10755
10756         nodes[1].node.claim_funds(our_payment_preimage);
10757         check_added_monitors!(nodes[1], 1);
10758         expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000);
10759
10760         nodes[2].node.claim_funds(payment_preimage_2);
10761         check_added_monitors!(nodes[2], 1);
10762         expect_payment_claimed!(nodes[2], payment_hash_2, 1_000_000);
10763
10764         for dest in &[1, 2] {
10765                 let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[*dest], nodes[0].node.get_our_node_id());
10766                 nodes[0].node.handle_update_fulfill_htlc(&nodes[*dest].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]);
10767                 commitment_signed_dance!(nodes[0], nodes[*dest], htlc_fulfill_updates.commitment_signed, false);
10768                 check_added_monitors(&nodes[0], 0);
10769         }
10770
10771         let (route, payment_hash_3, _, payment_secret_3) =
10772                 get_route_and_payment_hash!(nodes[1], nodes[0], 100_000);
10773         let payment_id = PaymentId(payment_hash_3.0);
10774         nodes[1].node.send_payment_with_route(&route, payment_hash_3,
10775                 RecipientOnionFields::secret_only(payment_secret_3), payment_id).unwrap();
10776         check_added_monitors(&nodes[1], 1);
10777
10778         let send_event = SendEvent::from_node(&nodes[1]);
10779         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
10780         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event.commitment_msg);
10781         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
10782
10783         if do_reload {
10784                 let nodes_0_serialized = nodes[0].node.encode();
10785                 let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
10786                 let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_2).encode();
10787                 reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, chain_monitor, nodes_0_deserialized);
10788
10789                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10790                 nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10791
10792                 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
10793                 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[2]));
10794         }
10795
10796         let events = nodes[0].node.get_and_clear_pending_events();
10797         assert_eq!(events.len(), 4);
10798         if let Event::PaymentSent { payment_preimage, .. } = events[0] {
10799                 assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2);
10800         } else { panic!(); }
10801         if let Event::PaymentSent { payment_preimage, .. } = events[1] {
10802                 assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2);
10803         } else { panic!(); }
10804         if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); }
10805         if let Event::PaymentPathSuccessful { .. } = events[3] {} else { panic!(); }
10806
10807         // After the events are processed, the ChannelMonitorUpdates will be released and, upon their
10808         // completion, we'll respond to nodes[1] with an RAA + CS.
10809         get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
10810         check_added_monitors(&nodes[0], 3);
10811 }
10812
10813 #[test]
10814 fn test_multi_post_event_actions() {
10815         do_test_multi_post_event_actions(true);
10816         do_test_multi_post_event_actions(false);
10817 }
10818
10819 #[test]
10820 fn test_batch_channel_open() {
10821         let chanmon_cfgs = create_chanmon_cfgs(3);
10822         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10823         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
10824         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10825
10826         // Initiate channel opening and create the batch channel funding transaction.
10827         let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
10828                 (&nodes[1], 100_000, 0, 42, None),
10829                 (&nodes[2], 200_000, 0, 43, None),
10830         ]);
10831
10832         // Go through the funding_created and funding_signed flow with node 1.
10833         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
10834         check_added_monitors(&nodes[1], 1);
10835         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
10836
10837         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
10838         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
10839         check_added_monitors(&nodes[0], 1);
10840
10841         // The transaction should not have been broadcast before all channels are ready.
10842         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
10843
10844         // Go through the funding_created and funding_signed flow with node 2.
10845         nodes[2].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[1]);
10846         check_added_monitors(&nodes[2], 1);
10847         expect_channel_pending_event(&nodes[2], &nodes[0].node.get_our_node_id());
10848
10849         let funding_signed_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
10850         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
10851         nodes[0].node.handle_funding_signed(&nodes[2].node.get_our_node_id(), &funding_signed_msg);
10852         check_added_monitors(&nodes[0], 1);
10853
10854         // The transaction should not have been broadcast before persisting all monitors has been
10855         // completed.
10856         assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
10857         assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
10858
10859         // Complete the persistence of the monitor.
10860         nodes[0].chain_monitor.complete_sole_pending_chan_update(
10861                 &ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 1 })
10862         );
10863         let events = nodes[0].node.get_and_clear_pending_events();
10864
10865         // The transaction should only have been broadcast now.
10866         let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
10867         assert_eq!(broadcasted_txs.len(), 1);
10868         assert_eq!(broadcasted_txs[0], tx);
10869
10870         assert_eq!(events.len(), 2);
10871         assert!(events.iter().any(|e| matches!(
10872                 *e,
10873                 crate::events::Event::ChannelPending {
10874                         ref counterparty_node_id,
10875                         ..
10876                 } if counterparty_node_id == &nodes[1].node.get_our_node_id(),
10877         )));
10878         assert!(events.iter().any(|e| matches!(
10879                 *e,
10880                 crate::events::Event::ChannelPending {
10881                         ref counterparty_node_id,
10882                         ..
10883                 } if counterparty_node_id == &nodes[2].node.get_our_node_id(),
10884         )));
10885 }
10886
10887 #[test]
10888 fn test_close_in_funding_batch() {
10889         // This test ensures that if one of the channels
10890         // in the batch closes, the complete batch will close.
10891         let chanmon_cfgs = create_chanmon_cfgs(3);
10892         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10893         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
10894         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10895
10896         // Initiate channel opening and create the batch channel funding transaction.
10897         let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
10898                 (&nodes[1], 100_000, 0, 42, None),
10899                 (&nodes[2], 200_000, 0, 43, None),
10900         ]);
10901
10902         // Go through the funding_created and funding_signed flow with node 1.
10903         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
10904         check_added_monitors(&nodes[1], 1);
10905         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
10906
10907         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
10908         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
10909         check_added_monitors(&nodes[0], 1);
10910
10911         // The transaction should not have been broadcast before all channels are ready.
10912         assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
10913
10914         // Force-close the channel for which we've completed the initial monitor.
10915         let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
10916         let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
10917         let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
10918         let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
10919
10920         nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
10921
10922         // The monitor should become closed.
10923         check_added_monitors(&nodes[0], 1);
10924         {
10925                 let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
10926                 let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
10927                 assert_eq!(monitor_updates_1.len(), 1);
10928                 assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
10929         }
10930
10931         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
10932         match msg_events[0] {
10933                 MessageSendEvent::HandleError { .. } => (),
10934                 _ => panic!("Unexpected message."),
10935         }
10936
10937         // We broadcast the commitment transaction as part of the force-close.
10938         {
10939                 let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
10940                 assert_eq!(broadcasted_txs.len(), 1);
10941                 assert!(broadcasted_txs[0].txid() != tx.txid());
10942                 assert_eq!(broadcasted_txs[0].input.len(), 1);
10943                 assert_eq!(broadcasted_txs[0].input[0].previous_output.txid, tx.txid());
10944         }
10945
10946         // All channels in the batch should close immediately.
10947         check_closed_events(&nodes[0], &[
10948                 ExpectedCloseEvent {
10949                         channel_id: Some(channel_id_1),
10950                         discard_funding: true,
10951                         channel_funding_txo: Some(funding_txo_1),
10952                         user_channel_id: Some(42),
10953                         ..Default::default()
10954                 },
10955                 ExpectedCloseEvent {
10956                         channel_id: Some(channel_id_2),
10957                         discard_funding: true,
10958                         channel_funding_txo: Some(funding_txo_2),
10959                         user_channel_id: Some(43),
10960                         ..Default::default()
10961                 },
10962         ]);
10963
10964         // Ensure the channels don't exist anymore.
10965         assert!(nodes[0].node.list_channels().is_empty());
10966 }
10967
10968 #[test]
10969 fn test_batch_funding_close_after_funding_signed() {
10970         let chanmon_cfgs = create_chanmon_cfgs(3);
10971         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10972         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
10973         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10974
10975         // Initiate channel opening and create the batch channel funding transaction.
10976         let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
10977                 (&nodes[1], 100_000, 0, 42, None),
10978                 (&nodes[2], 200_000, 0, 43, None),
10979         ]);
10980
10981         // Go through the funding_created and funding_signed flow with node 1.
10982         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
10983         check_added_monitors(&nodes[1], 1);
10984         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
10985
10986         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
10987         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
10988         check_added_monitors(&nodes[0], 1);
10989
10990         // Go through the funding_created and funding_signed flow with node 2.
10991         nodes[2].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[1]);
10992         check_added_monitors(&nodes[2], 1);
10993         expect_channel_pending_event(&nodes[2], &nodes[0].node.get_our_node_id());
10994
10995         let funding_signed_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
10996         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
10997         nodes[0].node.handle_funding_signed(&nodes[2].node.get_our_node_id(), &funding_signed_msg);
10998         check_added_monitors(&nodes[0], 1);
10999
11000         // The transaction should not have been broadcast before all channels are ready.
11001         assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
11002
11003         // Force-close the channel for which we've completed the initial monitor.
11004         let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
11005         let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
11006         let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
11007         let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
11008         nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
11009         check_added_monitors(&nodes[0], 2);
11010         {
11011                 let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
11012                 let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
11013                 assert_eq!(monitor_updates_1.len(), 1);
11014                 assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
11015                 let monitor_updates_2 = monitor_updates.get(&channel_id_2).unwrap();
11016                 assert_eq!(monitor_updates_2.len(), 1);
11017                 assert_eq!(monitor_updates_2[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
11018         }
11019         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
11020         match msg_events[0] {
11021                 MessageSendEvent::HandleError { .. } => (),
11022                 _ => panic!("Unexpected message."),
11023         }
11024
11025         // We broadcast the commitment transaction as part of the force-close.
11026         {
11027                 let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
11028                 assert_eq!(broadcasted_txs.len(), 1);
11029                 assert!(broadcasted_txs[0].txid() != tx.txid());
11030                 assert_eq!(broadcasted_txs[0].input.len(), 1);
11031                 assert_eq!(broadcasted_txs[0].input[0].previous_output.txid, tx.txid());
11032         }
11033
11034         // All channels in the batch should close immediately.
11035         check_closed_events(&nodes[0], &[
11036                 ExpectedCloseEvent {
11037                         channel_id: Some(channel_id_1),
11038                         discard_funding: true,
11039                         channel_funding_txo: Some(funding_txo_1),
11040                         user_channel_id: Some(42),
11041                         ..Default::default()
11042                 },
11043                 ExpectedCloseEvent {
11044                         channel_id: Some(channel_id_2),
11045                         discard_funding: true,
11046                         channel_funding_txo: Some(funding_txo_2),
11047                         user_channel_id: Some(43),
11048                         ..Default::default()
11049                 },
11050         ]);
11051
11052         // Ensure the channels don't exist anymore.
11053         assert!(nodes[0].node.list_channels().is_empty());
11054 }
11055
11056 fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitment: bool) {
11057         // Tests that a node will forget the channel (when it only requires 1 confirmation) if the
11058         // funding and commitment transaction confirm in the same block.
11059         let chanmon_cfgs = create_chanmon_cfgs(2);
11060         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
11061         let mut min_depth_1_block_cfg = test_default_channel_config();
11062         min_depth_1_block_cfg.channel_handshake_config.minimum_depth = 1;
11063         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(min_depth_1_block_cfg), Some(min_depth_1_block_cfg)]);
11064         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
11065
11066         let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
11067         let chan_id = ChannelId::v1_from_funding_outpoint(chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 });
11068
11069         assert_eq!(nodes[0].node.list_channels().len(), 1);
11070         assert_eq!(nodes[1].node.list_channels().len(), 1);
11071
11072         let (closing_node, other_node) = if confirm_remote_commitment {
11073                 (&nodes[1], &nodes[0])
11074         } else {
11075                 (&nodes[0], &nodes[1])
11076         };
11077
11078         closing_node.node.force_close_broadcasting_latest_txn(&chan_id, &other_node.node.get_our_node_id()).unwrap();
11079         let mut msg_events = closing_node.node.get_and_clear_pending_msg_events();
11080         assert_eq!(msg_events.len(), 1);
11081         match msg_events.pop().unwrap() {
11082                 MessageSendEvent::HandleError { action: msgs::ErrorAction::DisconnectPeer { .. }, .. } => {},
11083                 _ => panic!("Unexpected event"),
11084         }
11085         check_added_monitors(closing_node, 1);
11086         check_closed_event(closing_node, 1, ClosureReason::HolderForceClosed, false, &[other_node.node.get_our_node_id()], 1_000_000);
11087
11088         let commitment_tx = {
11089                 let mut txn = closing_node.tx_broadcaster.txn_broadcast();
11090                 assert_eq!(txn.len(), 1);
11091                 let commitment_tx = txn.pop().unwrap();
11092                 check_spends!(commitment_tx, funding_tx);
11093                 commitment_tx
11094         };
11095
11096         mine_transactions(&nodes[0], &[&funding_tx, &commitment_tx]);
11097         mine_transactions(&nodes[1], &[&funding_tx, &commitment_tx]);
11098
11099         check_closed_broadcast(other_node, 1, true);
11100         check_added_monitors(other_node, 1);
11101         check_closed_event(other_node, 1, ClosureReason::CommitmentTxConfirmed, false, &[closing_node.node.get_our_node_id()], 1_000_000);
11102
11103         assert!(nodes[0].node.list_channels().is_empty());
11104         assert!(nodes[1].node.list_channels().is_empty());
11105 }
11106
11107 #[test]
11108 fn test_funding_and_commitment_tx_confirm_same_block() {
11109         do_test_funding_and_commitment_tx_confirm_same_block(false);
11110         do_test_funding_and_commitment_tx_confirm_same_block(true);
11111 }
11112
11113 #[test]
11114 fn test_accept_inbound_channel_errors_queued() {
11115         // For manually accepted inbound channels, tests that a close error is correctly handled
11116         // and the channel fails for the initiator.
11117         let mut config0 = test_default_channel_config();
11118         let mut config1 = config0.clone();
11119         config1.channel_handshake_limits.their_to_self_delay = 1000;
11120         config1.manually_accept_inbound_channels = true;
11121         config0.channel_handshake_config.our_to_self_delay = 2000;
11122
11123         let chanmon_cfgs = create_chanmon_cfgs(2);
11124         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
11125         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config0), Some(config1)]);
11126         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
11127
11128         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
11129         let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
11130
11131         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
11132         let events = nodes[1].node.get_and_clear_pending_events();
11133         match events[0] {
11134                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
11135                         match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23) {
11136                                 Err(APIError::ChannelUnavailable { err: _ }) => (),
11137                                 _ => panic!(),
11138                         }
11139                 }
11140                 _ => panic!("Unexpected event"),
11141         }
11142         assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
11143                 open_channel_msg.common_fields.temporary_channel_id);
11144 }