3f9e263fcf7c19a5bf460f7d7c12387b8407ec91
[rust-lightning] / lightning / src / ln / functional_tests.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 //! Tests that test standing up a network of ChannelManagers, creating channels, sending
11 //! payments/messages between them, and often checking the resulting ChannelMonitors are able to
12 //! claim outputs on-chain.
13
14 use crate::chain;
15 use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
16 use crate::chain::chaininterface::LowerBoundedFeeEstimator;
17 use crate::chain::channelmonitor;
18 use crate::chain::channelmonitor::{CLOSED_CHANNEL_UPDATE_ID, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
19 use crate::chain::transaction::OutPoint;
20 use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, OutputSpender, SignerProvider};
21 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
22 use crate::ln::types::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash};
23 use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase};
24 use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA};
25 use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError};
26 use crate::ln::{chan_utils, onion_utils};
27 use crate::ln::chan_utils::{OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment};
28 use crate::routing::gossip::{NetworkGraph, NetworkUpdate};
29 use crate::routing::router::{Path, PaymentParameters, Route, RouteHop, get_route, RouteParameters};
30 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
31 use crate::ln::msgs;
32 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
33 use crate::util::test_channel_signer::TestChannelSigner;
34 use crate::util::test_utils::{self, WatchtowerPersister};
35 use crate::util::errors::APIError;
36 use crate::util::ser::{Writeable, ReadableArgs};
37 use crate::util::string::UntrustedString;
38 use crate::util::config::{UserConfig, MaxDustHTLCExposure};
39
40 use bitcoin::hash_types::BlockHash;
41 use bitcoin::blockdata::locktime::absolute::LockTime;
42 use bitcoin::blockdata::script::{Builder, ScriptBuf};
43 use bitcoin::blockdata::opcodes;
44 use bitcoin::blockdata::constants::ChainHash;
45 use bitcoin::network::constants::Network;
46 use bitcoin::{Sequence, Transaction, TxIn, TxOut, Witness};
47 use bitcoin::OutPoint as BitcoinOutPoint;
48
49 use bitcoin::secp256k1::Secp256k1;
50 use bitcoin::secp256k1::{PublicKey,SecretKey};
51
52 use crate::io;
53 use crate::prelude::*;
54 use alloc::collections::BTreeSet;
55 use core::iter::repeat;
56 use bitcoin::hashes::Hash;
57 use crate::sync::{Arc, Mutex, RwLock};
58
59 use crate::ln::functional_test_utils::*;
60 use crate::ln::chan_utils::CommitmentTransaction;
61
62 use super::channel::UNFUNDED_CHANNEL_AGE_LIMIT_TICKS;
63
64 #[test]
65 fn test_channel_resumption_fail_post_funding() {
66         // If we fail to exchange funding with a peer prior to it disconnecting we'll resume the
67         // channel open on reconnect, however if we do exchange funding we do not currently support
68         // replaying it and here test that the channel closes.
69         let chanmon_cfgs = create_chanmon_cfgs(2);
70         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
71         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
72         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
73
74         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 0, 42, None, None).unwrap();
75         let open_chan = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
76         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan);
77         let accept_chan = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
78         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan);
79
80         let (temp_chan_id, tx, funding_output) =
81                 create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
82         let new_chan_id = ChannelId::v1_from_funding_outpoint(funding_output);
83         nodes[0].node.funding_transaction_generated(&temp_chan_id, &nodes[1].node.get_our_node_id(), tx).unwrap();
84
85         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
86         check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(new_chan_id, true, ClosureReason::DisconnectedPeer)]);
87
88         // After ddf75afd16 we'd panic on reconnection if we exchanged funding info, so test that
89         // explicitly here.
90         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
91                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
92         }, true).unwrap();
93         assert_eq!(nodes[0].node.get_and_clear_pending_msg_events(), Vec::new());
94 }
95
96 #[test]
97 fn test_insane_channel_opens() {
98         // Stand up a network of 2 nodes
99         use crate::ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS;
100         let mut cfg = UserConfig::default();
101         cfg.channel_handshake_limits.max_funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1;
102         let chanmon_cfgs = create_chanmon_cfgs(2);
103         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
104         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(cfg)]);
105         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
106
107         // Instantiate channel parameters where we push the maximum msats given our
108         // funding satoshis
109         let channel_value_sat = 31337; // same as funding satoshis
110         let channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg);
111         let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000;
112
113         // Have node0 initiate a channel to node1 with aforementioned parameters
114         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_sat, push_msat, 42, None, None).unwrap();
115
116         // Extract the channel open message from node0 to node1
117         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
118
119         // Test helper that asserts we get the correct error string given a mutator
120         // that supposedly makes the channel open message insane
121         let insane_open_helper = |expected_error_str: &str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| {
122                 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &message_mutator(open_channel_message.clone()));
123                 let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
124                 assert_eq!(msg_events.len(), 1);
125                 let expected_regex = regex::Regex::new(expected_error_str).unwrap();
126                 if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] {
127                         match action {
128                                 &ErrorAction::SendErrorMessage { .. } => {
129                                         nodes[1].logger.assert_log_regex("lightning::ln::channelmanager", expected_regex, 1);
130                                 },
131                                 _ => panic!("unexpected event!"),
132                         }
133                 } else { assert!(false); }
134         };
135
136         use crate::ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT;
137
138         // Test all mutations that would make the channel open message insane
139         insane_open_helper(format!("Per our config, funding must be at most {}. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2).as_str(), |mut msg| { msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; msg });
140         insane_open_helper(format!("Funding must be smaller than the total bitcoin supply. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS).as_str(), |mut msg| { msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; msg });
141
142         insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.common_fields.funding_satoshis + 1; msg });
143
144         insane_open_helper(r"push_msat \d+ was larger than channel amount minus reserve \(\d+\)", |mut msg| { msg.push_msat = (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
145
146         insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.common_fields.dust_limit_satoshis = msg.common_fields.funding_satoshis + 1 ; msg });
147
148         insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.common_fields.htlc_minimum_msat = (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg });
149
150         insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.common_fields.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg });
151
152         insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { msg.common_fields.max_accepted_htlcs = 0; msg });
153
154         insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.common_fields.max_accepted_htlcs = 484; msg });
155 }
156
157 #[test]
158 fn test_funding_exceeds_no_wumbo_limit() {
159         // Test that if a peer does not support wumbo channels, we'll refuse to open a wumbo channel to
160         // them.
161         use crate::ln::channel::MAX_FUNDING_SATOSHIS_NO_WUMBO;
162         let chanmon_cfgs = create_chanmon_cfgs(2);
163         let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
164         *node_cfgs[1].override_init_features.borrow_mut() = Some(channelmanager::provided_init_features(&test_default_channel_config()).clear_wumbo());
165         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
166         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
167
168         match nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), MAX_FUNDING_SATOSHIS_NO_WUMBO + 1, 0, 42, None, None) {
169                 Err(APIError::APIMisuseError { err }) => {
170                         assert_eq!(format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, MAX_FUNDING_SATOSHIS_NO_WUMBO + 1), err);
171                 },
172                 _ => panic!()
173         }
174 }
175
176 fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
177         // A peer providing a channel_reserve_satoshis of 0 (or less than our dust limit) is insecure,
178         // but only for them. Because some LSPs do it with some level of trust of the clients (for a
179         // substantial UX improvement), we explicitly allow it. Because it's unlikely to happen often
180         // in normal testing, we test it explicitly here.
181         let chanmon_cfgs = create_chanmon_cfgs(2);
182         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
183         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
184         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
185         let default_config = UserConfig::default();
186
187         // Have node0 initiate a channel to node1 with aforementioned parameters
188         let mut push_amt = 100_000_000;
189         let feerate_per_kw = 253;
190         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
191         push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(&channel_type_features) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000;
192         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
193
194         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None, None).unwrap();
195         let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
196         if !send_from_initiator {
197                 open_channel_message.channel_reserve_satoshis = 0;
198                 open_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000;
199         }
200         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
201
202         // Extract the channel accept message from node1 to node0
203         let mut accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
204         if send_from_initiator {
205                 accept_channel_message.channel_reserve_satoshis = 0;
206                 accept_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000;
207         }
208         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
209         {
210                 let sender_node = if send_from_initiator { &nodes[1] } else { &nodes[0] };
211                 let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] };
212                 let mut sender_node_per_peer_lock;
213                 let mut sender_node_peer_state_lock;
214
215                 let channel_phase = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
216                 match channel_phase {
217                         ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
218                                 let chan_context = channel_phase.context_mut();
219                                 chan_context.holder_selected_channel_reserve_satoshis = 0;
220                                 chan_context.holder_max_htlc_value_in_flight_msat = 100_000_000;
221                         },
222                         _ => assert!(false),
223                 }
224         }
225
226         let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id);
227         let funding_msgs = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &funding_tx);
228         create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_msgs.0);
229
230         // nodes[0] should now be able to send the full balance to nodes[1], violating nodes[1]'s
231         // security model if it ever tries to send funds back to nodes[0] (but that's not our problem).
232         if send_from_initiator {
233                 send_payment(&nodes[0], &[&nodes[1]], 100_000_000
234                         // Note that for outbound channels we have to consider the commitment tx fee and the
235                         // "fee spike buffer", which is currently a multiple of the total commitment tx fee as
236                         // well as an additional HTLC.
237                         - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * commit_tx_fee_msat(feerate_per_kw, 2, &channel_type_features));
238         } else {
239                 send_payment(&nodes[1], &[&nodes[0]], push_amt);
240         }
241 }
242
243 #[test]
244 fn test_counterparty_no_reserve() {
245         do_test_counterparty_no_reserve(true);
246         do_test_counterparty_no_reserve(false);
247 }
248
249 #[test]
250 fn test_async_inbound_update_fee() {
251         let chanmon_cfgs = create_chanmon_cfgs(2);
252         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
253         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
254         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
255         create_announced_chan_between_nodes(&nodes, 0, 1);
256
257         // balancing
258         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
259
260         // A                                        B
261         // update_fee                            ->
262         // send (1) commitment_signed            -.
263         //                                       <- update_add_htlc/commitment_signed
264         // send (2) RAA (awaiting remote revoke) -.
265         // (1) commitment_signed is delivered    ->
266         //                                       .- send (3) RAA (awaiting remote revoke)
267         // (2) RAA is delivered                  ->
268         //                                       .- send (4) commitment_signed
269         //                                       <- (3) RAA is delivered
270         // send (5) commitment_signed            -.
271         //                                       <- (4) commitment_signed is delivered
272         // send (6) RAA                          -.
273         // (5) commitment_signed is delivered    ->
274         //                                       <- RAA
275         // (6) RAA is delivered                  ->
276
277         // First nodes[0] generates an update_fee
278         {
279                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
280                 *feerate_lock += 20;
281         }
282         nodes[0].node.timer_tick_occurred();
283         check_added_monitors!(nodes[0], 1);
284
285         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
286         assert_eq!(events_0.len(), 1);
287         let (update_msg, commitment_signed) = match events_0[0] { // (1)
288                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
289                         (update_fee.as_ref(), commitment_signed)
290                 },
291                 _ => panic!("Unexpected event"),
292         };
293
294         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
295
296         // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
297         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000);
298         nodes[1].node.send_payment_with_route(&route, our_payment_hash,
299                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
300         check_added_monitors!(nodes[1], 1);
301
302         let payment_event = {
303                 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
304                 assert_eq!(events_1.len(), 1);
305                 SendEvent::from_event(events_1.remove(0))
306         };
307         assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
308         assert_eq!(payment_event.msgs.len(), 1);
309
310         // ...now when the messages get delivered everyone should be happy
311         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
312         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2)
313         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
314         // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
315         check_added_monitors!(nodes[0], 1);
316
317         // deliver(1), generate (3):
318         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
319         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
320         // nodes[1] is awaiting nodes[0] revoke_and_ack so get_event_msg's assert(len == 1) passes
321         check_added_monitors!(nodes[1], 1);
322
323         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack); // deliver (2)
324         let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
325         assert!(bs_update.update_add_htlcs.is_empty()); // (4)
326         assert!(bs_update.update_fulfill_htlcs.is_empty()); // (4)
327         assert!(bs_update.update_fail_htlcs.is_empty()); // (4)
328         assert!(bs_update.update_fail_malformed_htlcs.is_empty()); // (4)
329         assert!(bs_update.update_fee.is_none()); // (4)
330         check_added_monitors!(nodes[1], 1);
331
332         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); // deliver (3)
333         let as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
334         assert!(as_update.update_add_htlcs.is_empty()); // (5)
335         assert!(as_update.update_fulfill_htlcs.is_empty()); // (5)
336         assert!(as_update.update_fail_htlcs.is_empty()); // (5)
337         assert!(as_update.update_fail_malformed_htlcs.is_empty()); // (5)
338         assert!(as_update.update_fee.is_none()); // (5)
339         check_added_monitors!(nodes[0], 1);
340
341         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed); // deliver (4)
342         let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
343         // only (6) so get_event_msg's assert(len == 1) passes
344         check_added_monitors!(nodes[0], 1);
345
346         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update.commitment_signed); // deliver (5)
347         let bs_second_revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
348         check_added_monitors!(nodes[1], 1);
349
350         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke);
351         check_added_monitors!(nodes[0], 1);
352
353         let events_2 = nodes[0].node.get_and_clear_pending_events();
354         assert_eq!(events_2.len(), 1);
355         match events_2[0] {
356                 Event::PendingHTLCsForwardable {..} => {}, // If we actually processed we'd receive the payment
357                 _ => panic!("Unexpected event"),
358         }
359
360         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke); // deliver (6)
361         check_added_monitors!(nodes[1], 1);
362 }
363
364 #[test]
365 fn test_update_fee_unordered_raa() {
366         // Just the intro to the previous test followed by an out-of-order RAA (which caused a
367         // crash in an earlier version of the update_fee patch)
368         let chanmon_cfgs = create_chanmon_cfgs(2);
369         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
370         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
371         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
372         create_announced_chan_between_nodes(&nodes, 0, 1);
373
374         // balancing
375         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
376
377         // First nodes[0] generates an update_fee
378         {
379                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
380                 *feerate_lock += 20;
381         }
382         nodes[0].node.timer_tick_occurred();
383         check_added_monitors!(nodes[0], 1);
384
385         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
386         assert_eq!(events_0.len(), 1);
387         let update_msg = match events_0[0] { // (1)
388                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
389                         update_fee.as_ref()
390                 },
391                 _ => panic!("Unexpected event"),
392         };
393
394         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
395
396         // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
397         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000);
398         nodes[1].node.send_payment_with_route(&route, our_payment_hash,
399                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
400         check_added_monitors!(nodes[1], 1);
401
402         let payment_event = {
403                 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
404                 assert_eq!(events_1.len(), 1);
405                 SendEvent::from_event(events_1.remove(0))
406         };
407         assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
408         assert_eq!(payment_event.msgs.len(), 1);
409
410         // ...now when the messages get delivered everyone should be happy
411         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
412         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2)
413         let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
414         // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
415         check_added_monitors!(nodes[0], 1);
416
417         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg); // deliver (2)
418         check_added_monitors!(nodes[1], 1);
419
420         // We can't continue, sadly, because our (1) now has a bogus signature
421 }
422
423 #[test]
424 fn test_multi_flight_update_fee() {
425         let chanmon_cfgs = create_chanmon_cfgs(2);
426         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
427         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
428         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
429         create_announced_chan_between_nodes(&nodes, 0, 1);
430
431         // A                                        B
432         // update_fee/commitment_signed          ->
433         //                                       .- send (1) RAA and (2) commitment_signed
434         // update_fee (never committed)          ->
435         // (3) update_fee                        ->
436         // We have to manually generate the above update_fee, it is allowed by the protocol but we
437         // don't track which updates correspond to which revoke_and_ack responses so we're in
438         // AwaitingRAA mode and will not generate the update_fee yet.
439         //                                       <- (1) RAA delivered
440         // (3) is generated and send (4) CS      -.
441         // Note that A cannot generate (4) prior to (1) being delivered as it otherwise doesn't
442         // know the per_commitment_point to use for it.
443         //                                       <- (2) commitment_signed delivered
444         // revoke_and_ack                        ->
445         //                                          B should send no response here
446         // (4) commitment_signed delivered       ->
447         //                                       <- RAA/commitment_signed delivered
448         // revoke_and_ack                        ->
449
450         // First nodes[0] generates an update_fee
451         let initial_feerate;
452         {
453                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
454                 initial_feerate = *feerate_lock;
455                 *feerate_lock = initial_feerate + 20;
456         }
457         nodes[0].node.timer_tick_occurred();
458         check_added_monitors!(nodes[0], 1);
459
460         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
461         assert_eq!(events_0.len(), 1);
462         let (update_msg_1, commitment_signed_1) = match events_0[0] { // (1)
463                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
464                         (update_fee.as_ref().unwrap(), commitment_signed)
465                 },
466                 _ => panic!("Unexpected event"),
467         };
468
469         // Deliver first update_fee/commitment_signed pair, generating (1) and (2):
470         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg_1);
471         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed_1);
472         let (bs_revoke_msg, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
473         check_added_monitors!(nodes[1], 1);
474
475         // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment
476         // transaction:
477         {
478                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
479                 *feerate_lock = initial_feerate + 40;
480         }
481         nodes[0].node.timer_tick_occurred();
482         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
483         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
484
485         // Create the (3) update_fee message that nodes[0] will generate before it does...
486         let mut update_msg_2 = msgs::UpdateFee {
487                 channel_id: update_msg_1.channel_id.clone(),
488                 feerate_per_kw: (initial_feerate + 30) as u32,
489         };
490
491         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2);
492
493         update_msg_2.feerate_per_kw = (initial_feerate + 40) as u32;
494         // Deliver (3)
495         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2);
496
497         // Deliver (1), generating (3) and (4)
498         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg);
499         let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
500         check_added_monitors!(nodes[0], 1);
501         assert!(as_second_update.update_add_htlcs.is_empty());
502         assert!(as_second_update.update_fulfill_htlcs.is_empty());
503         assert!(as_second_update.update_fail_htlcs.is_empty());
504         assert!(as_second_update.update_fail_malformed_htlcs.is_empty());
505         // Check that the update_fee newly generated matches what we delivered:
506         assert_eq!(as_second_update.update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id);
507         assert_eq!(as_second_update.update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw);
508
509         // Deliver (2) commitment_signed
510         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
511         let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
512         check_added_monitors!(nodes[0], 1);
513         // No commitment_signed so get_event_msg's assert(len == 1) passes
514
515         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg);
516         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
517         check_added_monitors!(nodes[1], 1);
518
519         // Delever (4)
520         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed);
521         let (bs_second_revoke, bs_second_commitment) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
522         check_added_monitors!(nodes[1], 1);
523
524         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke);
525         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
526         check_added_monitors!(nodes[0], 1);
527
528         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment);
529         let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
530         // No commitment_signed so get_event_msg's assert(len == 1) passes
531         check_added_monitors!(nodes[0], 1);
532
533         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke);
534         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
535         check_added_monitors!(nodes[1], 1);
536 }
537
538 fn do_test_sanity_on_in_flight_opens(steps: u8) {
539         // Previously, we had issues deserializing channels when we hadn't connected the first block
540         // after creation. To catch that and similar issues, we lean on the Node::drop impl to test
541         // serialization round-trips and simply do steps towards opening a channel and then drop the
542         // Node objects.
543
544         let chanmon_cfgs = create_chanmon_cfgs(2);
545         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
546         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
547         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
548
549         if steps & 0b1000_0000 != 0{
550                 let block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new());
551                 connect_block(&nodes[0], &block);
552                 connect_block(&nodes[1], &block);
553         }
554
555         if steps & 0x0f == 0 { return; }
556         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
557         let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
558
559         if steps & 0x0f == 1 { return; }
560         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
561         let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
562
563         if steps & 0x0f == 2 { return; }
564         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
565
566         let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
567
568         if steps & 0x0f == 3 { return; }
569         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
570         check_added_monitors!(nodes[0], 0);
571         let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
572
573         if steps & 0x0f == 4 { return; }
574         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
575         {
576                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
577                 assert_eq!(added_monitors.len(), 1);
578                 assert_eq!(added_monitors[0].0, funding_output);
579                 added_monitors.clear();
580         }
581         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
582
583         let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
584
585         if steps & 0x0f == 5 { return; }
586         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
587         {
588                 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
589                 assert_eq!(added_monitors.len(), 1);
590                 assert_eq!(added_monitors[0].0, funding_output);
591                 added_monitors.clear();
592         }
593
594         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
595         let events_4 = nodes[0].node.get_and_clear_pending_events();
596         assert_eq!(events_4.len(), 0);
597
598         if steps & 0x0f == 6 { return; }
599         create_chan_between_nodes_with_value_confirm_first(&nodes[0], &nodes[1], &tx, 2);
600
601         if steps & 0x0f == 7 { return; }
602         confirm_transaction_at(&nodes[0], &tx, 2);
603         connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
604         create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
605         expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id());
606 }
607
608 #[test]
609 fn test_sanity_on_in_flight_opens() {
610         do_test_sanity_on_in_flight_opens(0);
611         do_test_sanity_on_in_flight_opens(0 | 0b1000_0000);
612         do_test_sanity_on_in_flight_opens(1);
613         do_test_sanity_on_in_flight_opens(1 | 0b1000_0000);
614         do_test_sanity_on_in_flight_opens(2);
615         do_test_sanity_on_in_flight_opens(2 | 0b1000_0000);
616         do_test_sanity_on_in_flight_opens(3);
617         do_test_sanity_on_in_flight_opens(3 | 0b1000_0000);
618         do_test_sanity_on_in_flight_opens(4);
619         do_test_sanity_on_in_flight_opens(4 | 0b1000_0000);
620         do_test_sanity_on_in_flight_opens(5);
621         do_test_sanity_on_in_flight_opens(5 | 0b1000_0000);
622         do_test_sanity_on_in_flight_opens(6);
623         do_test_sanity_on_in_flight_opens(6 | 0b1000_0000);
624         do_test_sanity_on_in_flight_opens(7);
625         do_test_sanity_on_in_flight_opens(7 | 0b1000_0000);
626         do_test_sanity_on_in_flight_opens(8);
627         do_test_sanity_on_in_flight_opens(8 | 0b1000_0000);
628 }
629
630 #[test]
631 fn test_update_fee_vanilla() {
632         let chanmon_cfgs = create_chanmon_cfgs(2);
633         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
634         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
635         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
636         create_announced_chan_between_nodes(&nodes, 0, 1);
637
638         {
639                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
640                 *feerate_lock += 25;
641         }
642         nodes[0].node.timer_tick_occurred();
643         check_added_monitors!(nodes[0], 1);
644
645         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
646         assert_eq!(events_0.len(), 1);
647         let (update_msg, commitment_signed) = match events_0[0] {
648                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
649                         (update_fee.as_ref(), commitment_signed)
650                 },
651                 _ => panic!("Unexpected event"),
652         };
653         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
654
655         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
656         let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
657         check_added_monitors!(nodes[1], 1);
658
659         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
660         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
661         check_added_monitors!(nodes[0], 1);
662
663         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
664         let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
665         // No commitment_signed so get_event_msg's assert(len == 1) passes
666         check_added_monitors!(nodes[0], 1);
667
668         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
669         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
670         check_added_monitors!(nodes[1], 1);
671 }
672
673 #[test]
674 fn test_update_fee_that_funder_cannot_afford() {
675         let chanmon_cfgs = create_chanmon_cfgs(2);
676         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
677         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
678         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
679         let channel_value = 5000;
680         let push_sats = 700;
681         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, push_sats * 1000);
682         let channel_id = chan.2;
683         let secp_ctx = Secp256k1::new();
684         let default_config = UserConfig::default();
685         let bs_channel_reserve_sats = get_holder_selected_channel_reserve_satoshis(channel_value, &default_config);
686
687         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
688
689         // Calculate the maximum feerate that A can afford. Note that we don't send an update_fee
690         // CONCURRENT_INBOUND_HTLC_FEE_BUFFER HTLCs before actually running out of local balance, so we
691         // calculate two different feerates here - the expected local limit as well as the expected
692         // remote limit.
693         let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / (commitment_tx_base_weight(&channel_type_features) + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32;
694         let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / commitment_tx_base_weight(&channel_type_features)) as u32;
695         {
696                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
697                 *feerate_lock = feerate;
698         }
699         nodes[0].node.timer_tick_occurred();
700         check_added_monitors!(nodes[0], 1);
701         let update_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
702
703         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg.update_fee.unwrap());
704
705         commitment_signed_dance!(nodes[1], nodes[0], update_msg.commitment_signed, false);
706
707         // Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate set above.
708         {
709                 let commitment_tx = get_local_commitment_txn!(nodes[1], channel_id)[0].clone();
710
711                 //We made sure neither party's funds are below the dust limit and there are no HTLCs here
712                 assert_eq!(commitment_tx.output.len(), 2);
713                 let total_fee: u64 = commit_tx_fee_msat(feerate, 0, &channel_type_features) / 1000;
714                 let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value);
715                 actual_fee = channel_value - actual_fee;
716                 assert_eq!(total_fee, actual_fee);
717         }
718
719         {
720                 // Increment the feerate by a small constant, accounting for rounding errors
721                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
722                 *feerate_lock += 4;
723         }
724         nodes[0].node.timer_tick_occurred();
725         nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot afford to send new feerate at {}", feerate + 4), 1);
726         check_added_monitors!(nodes[0], 0);
727
728         const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654;
729
730         // Get the TestChannelSigner for each channel, which will be used to (1) get the keys
731         // needed to sign the new commitment tx and (2) sign the new commitment tx.
732         let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = {
733                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
734                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
735                 let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
736                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
737                 ).flatten().unwrap();
738                 let chan_signer = local_chan.get_signer();
739                 let pubkeys = chan_signer.as_ref().pubkeys();
740                 (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
741                  pubkeys.funding_pubkey)
742         };
743         let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point, remote_funding) = {
744                 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
745                 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
746                 let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
747                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
748                 ).flatten().unwrap();
749                 let chan_signer = remote_chan.get_signer();
750                 let pubkeys = chan_signer.as_ref().pubkeys();
751                 (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
752                  chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
753                  pubkeys.funding_pubkey)
754         };
755
756         // Assemble the set of keys we can use for signatures for our commitment_signed message.
757         let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint,
758                 &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint);
759
760         let res = {
761                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
762                 let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
763                 let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
764                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
765                 ).flatten().unwrap();
766                 let local_chan_signer = local_chan.get_signer();
767                 let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![];
768                 let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
769                         INITIAL_COMMITMENT_NUMBER - 1,
770                         push_sats,
771                         channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, &channel_type_features) / 1000,
772                         local_funding, remote_funding,
773                         commit_tx_keys.clone(),
774                         non_buffer_feerate + 4,
775                         &mut htlcs,
776                         &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
777                 );
778                 local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), Vec::new(), &secp_ctx).unwrap()
779         };
780
781         let commit_signed_msg = msgs::CommitmentSigned {
782                 channel_id: chan.2,
783                 signature: res.0,
784                 htlc_signatures: res.1,
785                 #[cfg(taproot)]
786                 partial_signature_with_nonce: None,
787         };
788
789         let update_fee = msgs::UpdateFee {
790                 channel_id: chan.2,
791                 feerate_per_kw: non_buffer_feerate + 4,
792         };
793
794         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_fee);
795
796         //While producing the commitment_signed response after handling a received update_fee request the
797         //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve)
798         //Should produce and error.
799         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
800         nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee", 3);
801         check_added_monitors!(nodes[1], 1);
802         check_closed_broadcast!(nodes[1], true);
803         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") },
804                 [nodes[0].node.get_our_node_id()], channel_value);
805 }
806
807 #[test]
808 fn test_update_fee_with_fundee_update_add_htlc() {
809         let chanmon_cfgs = create_chanmon_cfgs(2);
810         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
811         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
812         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
813         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
814
815         // balancing
816         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
817
818         {
819                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
820                 *feerate_lock += 20;
821         }
822         nodes[0].node.timer_tick_occurred();
823         check_added_monitors!(nodes[0], 1);
824
825         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
826         assert_eq!(events_0.len(), 1);
827         let (update_msg, commitment_signed) = match events_0[0] {
828                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
829                         (update_fee.as_ref(), commitment_signed)
830                 },
831                 _ => panic!("Unexpected event"),
832         };
833         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
834         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
835         let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
836         check_added_monitors!(nodes[1], 1);
837
838         let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 800000);
839
840         // nothing happens since node[1] is in AwaitingRemoteRevoke
841         nodes[1].node.send_payment_with_route(&route, our_payment_hash,
842                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
843         {
844                 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
845                 assert_eq!(added_monitors.len(), 0);
846                 added_monitors.clear();
847         }
848         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
849         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
850         // node[1] has nothing to do
851
852         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
853         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
854         check_added_monitors!(nodes[0], 1);
855
856         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
857         let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
858         // No commitment_signed so get_event_msg's assert(len == 1) passes
859         check_added_monitors!(nodes[0], 1);
860         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
861         check_added_monitors!(nodes[1], 1);
862         // AwaitingRemoteRevoke ends here
863
864         let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
865         assert_eq!(commitment_update.update_add_htlcs.len(), 1);
866         assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0);
867         assert_eq!(commitment_update.update_fail_htlcs.len(), 0);
868         assert_eq!(commitment_update.update_fail_malformed_htlcs.len(), 0);
869         assert_eq!(commitment_update.update_fee.is_none(), true);
870
871         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &commitment_update.update_add_htlcs[0]);
872         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed);
873         check_added_monitors!(nodes[0], 1);
874         let (revoke, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
875
876         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke);
877         check_added_monitors!(nodes[1], 1);
878         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
879
880         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed);
881         check_added_monitors!(nodes[1], 1);
882         let revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
883         // No commitment_signed so get_event_msg's assert(len == 1) passes
884
885         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke);
886         check_added_monitors!(nodes[0], 1);
887         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
888
889         expect_pending_htlcs_forwardable!(nodes[0]);
890
891         let events = nodes[0].node.get_and_clear_pending_events();
892         assert_eq!(events.len(), 1);
893         match events[0] {
894                 Event::PaymentClaimable { .. } => { },
895                 _ => panic!("Unexpected event"),
896         };
897
898         claim_payment(&nodes[1], &vec!(&nodes[0])[..], our_payment_preimage);
899
900         send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
901         send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
902         close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
903         check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
904         check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
905 }
906
907 #[test]
908 fn test_update_fee() {
909         let chanmon_cfgs = create_chanmon_cfgs(2);
910         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
911         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
912         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
913         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
914         let channel_id = chan.2;
915
916         // A                                        B
917         // (1) update_fee/commitment_signed      ->
918         //                                       <- (2) revoke_and_ack
919         //                                       .- send (3) commitment_signed
920         // (4) update_fee/commitment_signed      ->
921         //                                       .- send (5) revoke_and_ack (no CS as we're awaiting a revoke)
922         //                                       <- (3) commitment_signed delivered
923         // send (6) revoke_and_ack               -.
924         //                                       <- (5) deliver revoke_and_ack
925         // (6) deliver revoke_and_ack            ->
926         //                                       .- send (7) commitment_signed in response to (4)
927         //                                       <- (7) deliver commitment_signed
928         // revoke_and_ack                        ->
929
930         // Create and deliver (1)...
931         let feerate;
932         {
933                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
934                 feerate = *feerate_lock;
935                 *feerate_lock = feerate + 20;
936         }
937         nodes[0].node.timer_tick_occurred();
938         check_added_monitors!(nodes[0], 1);
939
940         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
941         assert_eq!(events_0.len(), 1);
942         let (update_msg, commitment_signed) = match events_0[0] {
943                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
944                         (update_fee.as_ref(), commitment_signed)
945                 },
946                 _ => panic!("Unexpected event"),
947         };
948         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
949
950         // Generate (2) and (3):
951         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
952         let (revoke_msg, commitment_signed_0) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
953         check_added_monitors!(nodes[1], 1);
954
955         // Deliver (2):
956         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
957         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
958         check_added_monitors!(nodes[0], 1);
959
960         // Create and deliver (4)...
961         {
962                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
963                 *feerate_lock = feerate + 30;
964         }
965         nodes[0].node.timer_tick_occurred();
966         check_added_monitors!(nodes[0], 1);
967         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
968         assert_eq!(events_0.len(), 1);
969         let (update_msg, commitment_signed) = match events_0[0] {
970                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
971                         (update_fee.as_ref(), commitment_signed)
972                 },
973                 _ => panic!("Unexpected event"),
974         };
975
976         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
977         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
978         check_added_monitors!(nodes[1], 1);
979         // ... creating (5)
980         let revoke_msg = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
981         // No commitment_signed so get_event_msg's assert(len == 1) passes
982
983         // Handle (3), creating (6):
984         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_0);
985         check_added_monitors!(nodes[0], 1);
986         let revoke_msg_0 = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
987         // No commitment_signed so get_event_msg's assert(len == 1) passes
988
989         // Deliver (5):
990         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
991         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
992         check_added_monitors!(nodes[0], 1);
993
994         // Deliver (6), creating (7):
995         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg_0);
996         let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
997         assert!(commitment_update.update_add_htlcs.is_empty());
998         assert!(commitment_update.update_fulfill_htlcs.is_empty());
999         assert!(commitment_update.update_fail_htlcs.is_empty());
1000         assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
1001         assert!(commitment_update.update_fee.is_none());
1002         check_added_monitors!(nodes[1], 1);
1003
1004         // Deliver (7)
1005         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed);
1006         check_added_monitors!(nodes[0], 1);
1007         let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1008         // No commitment_signed so get_event_msg's assert(len == 1) passes
1009
1010         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
1011         check_added_monitors!(nodes[1], 1);
1012         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1013
1014         assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30);
1015         assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30);
1016         close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
1017         check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1018         check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
1019 }
1020
1021 #[test]
1022 fn fake_network_test() {
1023         // Simple test which builds a network of ChannelManagers, connects them to each other, and
1024         // tests that payments get routed and transactions broadcast in semi-reasonable ways.
1025         let chanmon_cfgs = create_chanmon_cfgs(4);
1026         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
1027         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
1028         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
1029
1030         // Create some initial channels
1031         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1032         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
1033         let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
1034
1035         // Rebalance the network a bit by relaying one payment through all the channels...
1036         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1037         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1038         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1039         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1040
1041         // Send some more payments
1042         send_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 1000000);
1043         send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1], &nodes[0])[..], 1000000);
1044         send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1])[..], 1000000);
1045
1046         // Test failure packets
1047         let payment_hash_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 1000000).1;
1048         fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], payment_hash_1);
1049
1050         // Add a new channel that skips 3
1051         let chan_4 = create_announced_chan_between_nodes(&nodes, 1, 3);
1052
1053         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 1000000);
1054         send_payment(&nodes[2], &vec!(&nodes[3])[..], 1000000);
1055         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1056         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1057         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1058         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1059         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1060
1061         // Do some rebalance loop payments, simultaneously
1062         let mut hops = Vec::with_capacity(3);
1063         hops.push(RouteHop {
1064                 pubkey: nodes[2].node.get_our_node_id(),
1065                 node_features: NodeFeatures::empty(),
1066                 short_channel_id: chan_2.0.contents.short_channel_id,
1067                 channel_features: ChannelFeatures::empty(),
1068                 fee_msat: 0,
1069                 cltv_expiry_delta: chan_3.0.contents.cltv_expiry_delta as u32,
1070                 maybe_announced_channel: true,
1071         });
1072         hops.push(RouteHop {
1073                 pubkey: nodes[3].node.get_our_node_id(),
1074                 node_features: NodeFeatures::empty(),
1075                 short_channel_id: chan_3.0.contents.short_channel_id,
1076                 channel_features: ChannelFeatures::empty(),
1077                 fee_msat: 0,
1078                 cltv_expiry_delta: chan_4.1.contents.cltv_expiry_delta as u32,
1079                 maybe_announced_channel: true,
1080         });
1081         hops.push(RouteHop {
1082                 pubkey: nodes[1].node.get_our_node_id(),
1083                 node_features: nodes[1].node.node_features(),
1084                 short_channel_id: chan_4.0.contents.short_channel_id,
1085                 channel_features: nodes[1].node.channel_features(),
1086                 fee_msat: 1000000,
1087                 cltv_expiry_delta: TEST_FINAL_CLTV,
1088                 maybe_announced_channel: true,
1089         });
1090         hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
1091         hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
1092         let payment_preimage_1 = send_along_route(&nodes[1],
1093                 Route { paths: vec![Path { hops, blinded_tail: None }], route_params: None },
1094                         &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0;
1095
1096         let mut hops = Vec::with_capacity(3);
1097         hops.push(RouteHop {
1098                 pubkey: nodes[3].node.get_our_node_id(),
1099                 node_features: NodeFeatures::empty(),
1100                 short_channel_id: chan_4.0.contents.short_channel_id,
1101                 channel_features: ChannelFeatures::empty(),
1102                 fee_msat: 0,
1103                 cltv_expiry_delta: chan_3.1.contents.cltv_expiry_delta as u32,
1104                 maybe_announced_channel: true,
1105         });
1106         hops.push(RouteHop {
1107                 pubkey: nodes[2].node.get_our_node_id(),
1108                 node_features: NodeFeatures::empty(),
1109                 short_channel_id: chan_3.0.contents.short_channel_id,
1110                 channel_features: ChannelFeatures::empty(),
1111                 fee_msat: 0,
1112                 cltv_expiry_delta: chan_2.1.contents.cltv_expiry_delta as u32,
1113                 maybe_announced_channel: true,
1114         });
1115         hops.push(RouteHop {
1116                 pubkey: nodes[1].node.get_our_node_id(),
1117                 node_features: nodes[1].node.node_features(),
1118                 short_channel_id: chan_2.0.contents.short_channel_id,
1119                 channel_features: nodes[1].node.channel_features(),
1120                 fee_msat: 1000000,
1121                 cltv_expiry_delta: TEST_FINAL_CLTV,
1122                 maybe_announced_channel: true,
1123         });
1124         hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
1125         hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
1126         let payment_hash_2 = send_along_route(&nodes[1],
1127                 Route { paths: vec![Path { hops, blinded_tail: None }], route_params: None },
1128                         &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1;
1129
1130         // Claim the rebalances...
1131         fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
1132         claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
1133
1134         // Close down the channels...
1135         close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
1136         check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1137         check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
1138         close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
1139         check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
1140         check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1141         close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
1142         check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
1143         check_closed_event!(nodes[3], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
1144         close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
1145         check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
1146         check_closed_event!(nodes[3], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1147 }
1148
1149 #[test]
1150 fn holding_cell_htlc_counting() {
1151         // Tests that HTLCs in the holding cell count towards the pending HTLC limits on outbound HTLCs
1152         // to ensure we don't end up with HTLCs sitting around in our holding cell for several
1153         // commitment dance rounds.
1154         let chanmon_cfgs = create_chanmon_cfgs(3);
1155         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1156         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1157         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1158         create_announced_chan_between_nodes(&nodes, 0, 1);
1159         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
1160
1161         // Fetch a route in advance as we will be unable to once we're unable to send.
1162         let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
1163
1164         let mut payments = Vec::new();
1165         for _ in 0..50 {
1166                 let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
1167                 nodes[1].node.send_payment_with_route(&route, payment_hash,
1168                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
1169                 payments.push((payment_preimage, payment_hash));
1170         }
1171         check_added_monitors!(nodes[1], 1);
1172
1173         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
1174         assert_eq!(events.len(), 1);
1175         let initial_payment_event = SendEvent::from_event(events.pop().unwrap());
1176         assert_eq!(initial_payment_event.node_id, nodes[2].node.get_our_node_id());
1177
1178         // There is now one HTLC in an outbound commitment transaction and (OUR_MAX_HTLCS - 1) HTLCs in
1179         // the holding cell waiting on B's RAA to send. At this point we should not be able to add
1180         // another HTLC.
1181         {
1182                 unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, payment_hash_1,
1183                                 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)
1184                         ), true, APIError::ChannelUnavailable { .. }, {});
1185                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1186         }
1187
1188         // This should also be true if we try to forward a payment.
1189         let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
1190         {
1191                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1192                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1193                 check_added_monitors!(nodes[0], 1);
1194         }
1195
1196         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1197         assert_eq!(events.len(), 1);
1198         let payment_event = SendEvent::from_event(events.pop().unwrap());
1199         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1200
1201         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1202         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
1203         // We have to forward pending HTLCs twice - once tries to forward the payment forward (and
1204         // fails), the second will process the resulting failure and fail the HTLC backward.
1205         expect_pending_htlcs_forwardable!(nodes[1]);
1206         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
1207         check_added_monitors!(nodes[1], 1);
1208
1209         let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1210         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_updates.update_fail_htlcs[0]);
1211         commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true);
1212
1213         expect_payment_failed_with_update!(nodes[0], payment_hash_2, false, chan_2.0.contents.short_channel_id, false);
1214
1215         // Now forward all the pending HTLCs and claim them back
1216         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &initial_payment_event.msgs[0]);
1217         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &initial_payment_event.commitment_msg);
1218         check_added_monitors!(nodes[2], 1);
1219
1220         let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1221         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1222         check_added_monitors!(nodes[1], 1);
1223         let as_updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1224
1225         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed);
1226         check_added_monitors!(nodes[1], 1);
1227         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1228
1229         for ref update in as_updates.update_add_htlcs.iter() {
1230                 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), update);
1231         }
1232         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_updates.commitment_signed);
1233         check_added_monitors!(nodes[2], 1);
1234         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
1235         check_added_monitors!(nodes[2], 1);
1236         let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1237
1238         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1239         check_added_monitors!(nodes[1], 1);
1240         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed);
1241         check_added_monitors!(nodes[1], 1);
1242         let as_final_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1243
1244         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_final_raa);
1245         check_added_monitors!(nodes[2], 1);
1246
1247         expect_pending_htlcs_forwardable!(nodes[2]);
1248
1249         let events = nodes[2].node.get_and_clear_pending_events();
1250         assert_eq!(events.len(), payments.len());
1251         for (event, &(_, ref hash)) in events.iter().zip(payments.iter()) {
1252                 match event {
1253                         &Event::PaymentClaimable { ref payment_hash, .. } => {
1254                                 assert_eq!(*payment_hash, *hash);
1255                         },
1256                         _ => panic!("Unexpected event"),
1257                 };
1258         }
1259
1260         for (preimage, _) in payments.drain(..) {
1261                 claim_payment(&nodes[1], &[&nodes[2]], preimage);
1262         }
1263
1264         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
1265 }
1266
1267 #[test]
1268 fn duplicate_htlc_test() {
1269         // Test that we accept duplicate payment_hash HTLCs across the network and that
1270         // claiming/failing them are all separate and don't affect each other
1271         let chanmon_cfgs = create_chanmon_cfgs(6);
1272         let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
1273         let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs, &[None, None, None, None, None, None]);
1274         let mut nodes = create_network(6, &node_cfgs, &node_chanmgrs);
1275
1276         // Create some initial channels to route via 3 to 4/5 from 0/1/2
1277         create_announced_chan_between_nodes(&nodes, 0, 3);
1278         create_announced_chan_between_nodes(&nodes, 1, 3);
1279         create_announced_chan_between_nodes(&nodes, 2, 3);
1280         create_announced_chan_between_nodes(&nodes, 3, 4);
1281         create_announced_chan_between_nodes(&nodes, 3, 5);
1282
1283         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000);
1284
1285         *nodes[0].network_payment_count.borrow_mut() -= 1;
1286         assert_eq!(route_payment(&nodes[1], &vec!(&nodes[3])[..], 1000000).0, payment_preimage);
1287
1288         *nodes[0].network_payment_count.borrow_mut() -= 1;
1289         assert_eq!(route_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], 1000000).0, payment_preimage);
1290
1291         claim_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], payment_preimage);
1292         fail_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], payment_hash);
1293         claim_payment(&nodes[1], &vec!(&nodes[3])[..], payment_preimage);
1294 }
1295
1296 #[test]
1297 fn test_duplicate_htlc_different_direction_onchain() {
1298         // Test that ChannelMonitor doesn't generate 2 preimage txn
1299         // when we have 2 HTLCs with same preimage that go across a node
1300         // in opposite directions, even with the same payment secret.
1301         let chanmon_cfgs = create_chanmon_cfgs(2);
1302         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1303         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1304         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1305
1306         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1307
1308         // balancing
1309         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
1310
1311         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000);
1312
1313         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], 800_000);
1314         let node_a_payment_secret = nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
1315         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[0]]], 800_000, payment_hash, node_a_payment_secret);
1316
1317         // Provide preimage to node 0 by claiming payment
1318         nodes[0].node.claim_funds(payment_preimage);
1319         expect_payment_claimed!(nodes[0], payment_hash, 800_000);
1320         check_added_monitors!(nodes[0], 1);
1321
1322         // Broadcast node 1 commitment txn
1323         let remote_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
1324
1325         assert_eq!(remote_txn[0].output.len(), 4); // 1 local, 1 remote, 1 htlc inbound, 1 htlc outbound
1326         let mut has_both_htlcs = 0; // check htlcs match ones committed
1327         for outp in remote_txn[0].output.iter() {
1328                 if outp.value == 800_000 / 1000 {
1329                         has_both_htlcs += 1;
1330                 } else if outp.value == 900_000 / 1000 {
1331                         has_both_htlcs += 1;
1332                 }
1333         }
1334         assert_eq!(has_both_htlcs, 2);
1335
1336         mine_transaction(&nodes[0], &remote_txn[0]);
1337         check_added_monitors!(nodes[0], 1);
1338         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
1339         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
1340
1341         let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
1342         assert_eq!(claim_txn.len(), 3);
1343
1344         check_spends!(claim_txn[0], remote_txn[0]); // Immediate HTLC claim with preimage
1345         check_spends!(claim_txn[1], remote_txn[0]);
1346         check_spends!(claim_txn[2], remote_txn[0]);
1347         let preimage_tx = &claim_txn[0];
1348         let (preimage_bump_tx, timeout_tx) = if claim_txn[1].input[0].previous_output == preimage_tx.input[0].previous_output {
1349                 (&claim_txn[1], &claim_txn[2])
1350         } else {
1351                 (&claim_txn[2], &claim_txn[1])
1352         };
1353
1354         assert_eq!(preimage_tx.input.len(), 1);
1355         assert_eq!(preimage_bump_tx.input.len(), 1);
1356
1357         assert_eq!(preimage_tx.input.len(), 1);
1358         assert_eq!(preimage_tx.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC 1 <--> 0, preimage tx
1359         assert_eq!(remote_txn[0].output[preimage_tx.input[0].previous_output.vout as usize].value, 800);
1360
1361         assert_eq!(timeout_tx.input.len(), 1);
1362         assert_eq!(timeout_tx.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // HTLC 0 <--> 1, timeout tx
1363         check_spends!(timeout_tx, remote_txn[0]);
1364         assert_eq!(remote_txn[0].output[timeout_tx.input[0].previous_output.vout as usize].value, 900);
1365
1366         let events = nodes[0].node.get_and_clear_pending_msg_events();
1367         assert_eq!(events.len(), 3);
1368         for e in events {
1369                 match e {
1370                         MessageSendEvent::BroadcastChannelUpdate { .. } => {},
1371                         MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::DisconnectPeer { ref msg } } => {
1372                                 assert_eq!(node_id, nodes[1].node.get_our_node_id());
1373                                 assert_eq!(msg.as_ref().unwrap().data, "Channel closed because commitment or closing transaction was confirmed on chain.");
1374                         },
1375                         MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
1376                                 assert!(update_add_htlcs.is_empty());
1377                                 assert!(update_fail_htlcs.is_empty());
1378                                 assert_eq!(update_fulfill_htlcs.len(), 1);
1379                                 assert!(update_fail_malformed_htlcs.is_empty());
1380                                 assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
1381                         },
1382                         _ => panic!("Unexpected event"),
1383                 }
1384         }
1385 }
1386
1387 #[test]
1388 fn test_basic_channel_reserve() {
1389         let chanmon_cfgs = create_chanmon_cfgs(2);
1390         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1391         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1392         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1393         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1394
1395         let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
1396         let channel_reserve = chan_stat.channel_reserve_msat;
1397
1398         // The 2* and +1 are for the fee spike reserve.
1399         let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], nodes[1], chan.2), 1 + 1, &get_channel_type_features!(nodes[0], nodes[1], chan.2));
1400         let max_can_send = 5000000 - channel_reserve - commit_tx_fee;
1401         let (mut route, our_payment_hash, _, our_payment_secret) =
1402                 get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
1403         route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
1404         let err = nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1405                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).err().unwrap();
1406         match err {
1407                 PaymentSendFailure::AllFailedResendSafe(ref fails) => {
1408                         if let &APIError::ChannelUnavailable { .. } = &fails[0] {}
1409                         else { panic!("Unexpected error variant"); }
1410                 },
1411                 _ => panic!("Unexpected error variant"),
1412         }
1413         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1414
1415         send_payment(&nodes[0], &vec![&nodes[1]], max_can_send);
1416 }
1417
1418 #[test]
1419 fn test_fee_spike_violation_fails_htlc() {
1420         let chanmon_cfgs = create_chanmon_cfgs(2);
1421         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1422         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1423         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1424         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1425
1426         let (mut route, payment_hash, _, payment_secret) =
1427                 get_route_and_payment_hash!(nodes[0], nodes[1], 3460000);
1428         route.paths[0].hops[0].fee_msat += 1;
1429         // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1430         let secp_ctx = Secp256k1::new();
1431         let session_priv = SecretKey::from_slice(&[42; 32]).expect("RNG is bad!");
1432
1433         let cur_height = nodes[1].node.best_block.read().unwrap().height + 1;
1434
1435         let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
1436         let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret);
1437         let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
1438                 3460001, &recipient_onion_fields, cur_height, &None).unwrap();
1439         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
1440         let msg = msgs::UpdateAddHTLC {
1441                 channel_id: chan.2,
1442                 htlc_id: 0,
1443                 amount_msat: htlc_msat,
1444                 payment_hash: payment_hash,
1445                 cltv_expiry: htlc_cltv,
1446                 onion_routing_packet: onion_packet,
1447                 skimmed_fee_msat: None,
1448                 blinding_point: None,
1449         };
1450
1451         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
1452
1453         // Now manually create the commitment_signed message corresponding to the update_add
1454         // nodes[0] just sent. In the code for construction of this message, "local" refers
1455         // to the sender of the message, and "remote" refers to the receiver.
1456
1457         let feerate_per_kw = get_feerate!(nodes[0], nodes[1], chan.2);
1458
1459         const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
1460
1461         // Get the TestChannelSigner for each channel, which will be used to (1) get the keys
1462         // needed to sign the new commitment tx and (2) sign the new commitment tx.
1463         let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point, local_funding) = {
1464                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
1465                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
1466                 let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
1467                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
1468                 ).flatten().unwrap();
1469                 let chan_signer = local_chan.get_signer();
1470                 // Make the signer believe we validated another commitment, so we can release the secret
1471                 chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
1472
1473                 let pubkeys = chan_signer.as_ref().pubkeys();
1474                 (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
1475                  chan_signer.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER),
1476                  chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx),
1477                  chan_signer.as_ref().pubkeys().funding_pubkey)
1478         };
1479         let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = {
1480                 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
1481                 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
1482                 let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
1483                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
1484                 ).flatten().unwrap();
1485                 let chan_signer = remote_chan.get_signer();
1486                 let pubkeys = chan_signer.as_ref().pubkeys();
1487                 (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
1488                  chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
1489                  chan_signer.as_ref().pubkeys().funding_pubkey)
1490         };
1491
1492         // Assemble the set of keys we can use for signatures for our commitment_signed message.
1493         let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint,
1494                 &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint);
1495
1496         // Build the remote commitment transaction so we can sign it, and then later use the
1497         // signature for the commitment_signed message.
1498         let local_chan_balance = 1313;
1499
1500         let accepted_htlc_info = chan_utils::HTLCOutputInCommitment {
1501                 offered: false,
1502                 amount_msat: 3460001,
1503                 cltv_expiry: htlc_cltv,
1504                 payment_hash,
1505                 transaction_output_index: Some(1),
1506         };
1507
1508         let commitment_number = INITIAL_COMMITMENT_NUMBER - 1;
1509
1510         let res = {
1511                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
1512                 let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
1513                 let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
1514                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
1515                 ).flatten().unwrap();
1516                 let local_chan_signer = local_chan.get_signer();
1517                 let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
1518                         commitment_number,
1519                         95000,
1520                         local_chan_balance,
1521                         local_funding, remote_funding,
1522                         commit_tx_keys.clone(),
1523                         feerate_per_kw,
1524                         &mut vec![(accepted_htlc_info, ())],
1525                         &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
1526                 );
1527                 local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), Vec::new(), &secp_ctx).unwrap()
1528         };
1529
1530         let commit_signed_msg = msgs::CommitmentSigned {
1531                 channel_id: chan.2,
1532                 signature: res.0,
1533                 htlc_signatures: res.1,
1534                 #[cfg(taproot)]
1535                 partial_signature_with_nonce: None,
1536         };
1537
1538         // Send the commitment_signed message to the nodes[1].
1539         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
1540         let _ = nodes[1].node.get_and_clear_pending_msg_events();
1541
1542         // Send the RAA to nodes[1].
1543         let raa_msg = msgs::RevokeAndACK {
1544                 channel_id: chan.2,
1545                 per_commitment_secret: local_secret,
1546                 next_per_commitment_point: next_local_point,
1547                 #[cfg(taproot)]
1548                 next_local_nonce: None,
1549         };
1550         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa_msg);
1551
1552         let events = nodes[1].node.get_and_clear_pending_msg_events();
1553         assert_eq!(events.len(), 1);
1554         // Make sure the HTLC failed in the way we expect.
1555         match events[0] {
1556                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, .. }, .. } => {
1557                         assert_eq!(update_fail_htlcs.len(), 1);
1558                         update_fail_htlcs[0].clone()
1559                 },
1560                 _ => panic!("Unexpected event"),
1561         };
1562         nodes[1].logger.assert_log("lightning::ln::channel",
1563                 format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1);
1564
1565         check_added_monitors!(nodes[1], 2);
1566 }
1567
1568 #[test]
1569 fn test_chan_reserve_violation_outbound_htlc_inbound_chan() {
1570         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1571         // Set the fee rate for the channel very high, to the point where the fundee
1572         // sending any above-dust amount would result in a channel reserve violation.
1573         // In this test we check that we would be prevented from sending an HTLC in
1574         // this situation.
1575         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1576         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1577         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1578         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1579         let default_config = UserConfig::default();
1580         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1581
1582         let mut push_amt = 100_000_000;
1583         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1584
1585         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1586
1587         let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt);
1588
1589         // Fetch a route in advance as we will be unable to once we're unable to send.
1590         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000);
1591         // Sending exactly enough to hit the reserve amount should be accepted
1592         for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1593                 route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1594         }
1595
1596         // However one more HTLC should be significantly over the reserve amount and fail.
1597         unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash,
1598                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1599                 ), true, APIError::ChannelUnavailable { .. }, {});
1600         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1601 }
1602
1603 #[test]
1604 fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
1605         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1606         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1607         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1608         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1609         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1610         let default_config = UserConfig::default();
1611         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1612
1613         // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
1614         // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
1615         // transaction fee with 0 HTLCs (183 sats)).
1616         let mut push_amt = 100_000_000;
1617         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1618         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1619         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt);
1620
1621         // Send four HTLCs to cover the initial push_msat buffer we're required to include
1622         for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1623                 route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1624         }
1625
1626         let (mut route, payment_hash, _, payment_secret) =
1627                 get_route_and_payment_hash!(nodes[1], nodes[0], 1000);
1628         route.paths[0].hops[0].fee_msat = 700_000;
1629         // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1630         let secp_ctx = Secp256k1::new();
1631         let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
1632         let cur_height = nodes[1].node.best_block.read().unwrap().height + 1;
1633         let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
1634         let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret);
1635         let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
1636                 700_000, &recipient_onion_fields, cur_height, &None).unwrap();
1637         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
1638         let msg = msgs::UpdateAddHTLC {
1639                 channel_id: chan.2,
1640                 htlc_id: MIN_AFFORDABLE_HTLC_COUNT as u64,
1641                 amount_msat: htlc_msat,
1642                 payment_hash: payment_hash,
1643                 cltv_expiry: htlc_cltv,
1644                 onion_routing_packet: onion_packet,
1645                 skimmed_fee_msat: None,
1646                 blinding_point: None,
1647         };
1648
1649         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg);
1650         // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
1651         nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value", 3);
1652         assert_eq!(nodes[0].node.list_channels().len(), 0);
1653         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
1654         assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
1655         check_added_monitors!(nodes[0], 1);
1656         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() },
1657                 [nodes[1].node.get_our_node_id()], 100000);
1658 }
1659
1660 #[test]
1661 fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() {
1662         // Test that if we receive many dust HTLCs over an outbound channel, they don't count when
1663         // calculating our commitment transaction fee (this was previously broken).
1664         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1665         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1666
1667         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1668         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
1669         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1670         let default_config = UserConfig::default();
1671         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1672
1673         // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
1674         // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
1675         // transaction fee with 0 HTLCs (183 sats)).
1676         let mut push_amt = 100_000_000;
1677         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1678         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1679         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt);
1680
1681         let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000
1682                 + feerate_per_kw as u64 * htlc_success_tx_weight(&channel_type_features) / 1000 * 1000 - 1;
1683         // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel
1684         // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the
1685         // commitment transaction fee.
1686         route_payment(&nodes[1], &[&nodes[0]], dust_amt);
1687
1688         // Send four HTLCs to cover the initial push_msat buffer we're required to include
1689         for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
1690                 route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1691         }
1692
1693         // One more than the dust amt should fail, however.
1694         let (mut route, our_payment_hash, _, our_payment_secret) =
1695                 get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt);
1696         route.paths[0].hops[0].fee_msat += 1;
1697         unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash,
1698                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1699                 ), true, APIError::ChannelUnavailable { .. }, {});
1700 }
1701
1702 #[test]
1703 fn test_chan_init_feerate_unaffordability() {
1704         // Test that we will reject channel opens which do not leave enough to pay for any HTLCs due to
1705         // channel reserve and feerate requirements.
1706         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1707         let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
1708         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1709         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1710         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1711         let default_config = UserConfig::default();
1712         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
1713
1714         // Set the push_msat amount such that nodes[0] will not be able to afford to add even a single
1715         // HTLC.
1716         let mut push_amt = 100_000_000;
1717         push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
1718         assert_eq!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt + 1, 42, None, None).unwrap_err(),
1719                 APIError::APIMisuseError { err: "Funding amount (356) can't even pay fee for initial commitment transaction fee of 357.".to_string() });
1720
1721         // During open, we don't have a "counterparty channel reserve" to check against, so that
1722         // requirement only comes into play on the open_channel handling side.
1723         push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
1724         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt, 42, None, None).unwrap();
1725         let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
1726         open_channel_msg.push_msat += 1;
1727         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
1728
1729         let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
1730         assert_eq!(msg_events.len(), 1);
1731         match msg_events[0] {
1732                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
1733                         assert_eq!(msg.data, "Insufficient funding amount for initial reserve");
1734                 },
1735                 _ => panic!("Unexpected event"),
1736         }
1737 }
1738
1739 #[test]
1740 fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() {
1741         // Test that if we receive many dust HTLCs over an inbound channel, they don't count when
1742         // calculating our counterparty's commitment transaction fee (this was previously broken).
1743         let chanmon_cfgs = create_chanmon_cfgs(2);
1744         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1745         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
1746         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1747         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 98000000);
1748
1749         let payment_amt = 46000; // Dust amount
1750         // In the previous code, these first four payments would succeed.
1751         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1752         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1753         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1754         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1755
1756         // Then these next 5 would be interpreted by nodes[1] as violating the fee spike buffer.
1757         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1758         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1759         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1760         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1761         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1762
1763         // And this last payment previously resulted in nodes[1] closing on its inbound-channel
1764         // counterparty, because it counted all the previous dust HTLCs against nodes[0]'s commitment
1765         // transaction fee and therefore perceived this next payment as a channel reserve violation.
1766         route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1767 }
1768
1769 #[test]
1770 fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
1771         let chanmon_cfgs = create_chanmon_cfgs(3);
1772         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1773         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1774         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1775         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1776         let _ = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000);
1777
1778         let feemsat = 239;
1779         let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat;
1780         let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
1781         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
1782         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
1783
1784         // Add a 2* and +1 for the fee spike reserve.
1785         let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features);
1786         let recv_value_1 = (chan_stat.value_to_self_msat - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlc)/2;
1787         let amt_msat_1 = recv_value_1 + total_routing_fee_msat;
1788
1789         // Add a pending HTLC.
1790         let (route_1, our_payment_hash_1, _, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat_1);
1791         let payment_event_1 = {
1792                 nodes[0].node.send_payment_with_route(&route_1, our_payment_hash_1,
1793                         RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
1794                 check_added_monitors!(nodes[0], 1);
1795
1796                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1797                 assert_eq!(events.len(), 1);
1798                 SendEvent::from_event(events.remove(0))
1799         };
1800         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
1801
1802         // Attempt to trigger a channel reserve violation --> payment failure.
1803         let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2, &channel_type_features);
1804         let recv_value_2 = chan_stat.value_to_self_msat - amt_msat_1 - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlcs + 1;
1805         let amt_msat_2 = recv_value_2 + total_routing_fee_msat;
1806         let mut route_2 = route_1.clone();
1807         route_2.paths[0].hops.last_mut().unwrap().fee_msat = amt_msat_2;
1808
1809         // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1810         let secp_ctx = Secp256k1::new();
1811         let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
1812         let cur_height = nodes[0].node.best_block.read().unwrap().height + 1;
1813         let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route_2.paths[0], &session_priv).unwrap();
1814         let recipient_onion_fields = RecipientOnionFields::spontaneous_empty();
1815         let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
1816                 &route_2.paths[0], recv_value_2, &recipient_onion_fields, cur_height, &None).unwrap();
1817         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash_1).unwrap();
1818         let msg = msgs::UpdateAddHTLC {
1819                 channel_id: chan.2,
1820                 htlc_id: 1,
1821                 amount_msat: htlc_msat + 1,
1822                 payment_hash: our_payment_hash_1,
1823                 cltv_expiry: htlc_cltv,
1824                 onion_routing_packet: onion_packet,
1825                 skimmed_fee_msat: None,
1826                 blinding_point: None,
1827         };
1828
1829         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
1830         // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
1831         nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote HTLC add would put them under remote reserve value", 3);
1832         assert_eq!(nodes[1].node.list_channels().len(), 1);
1833         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
1834         assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
1835         check_added_monitors!(nodes[1], 1);
1836         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() },
1837                 [nodes[0].node.get_our_node_id()], 100000);
1838 }
1839
1840 #[test]
1841 fn test_inbound_outbound_capacity_is_not_zero() {
1842         let chanmon_cfgs = create_chanmon_cfgs(2);
1843         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1844         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1845         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1846         let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
1847         let channels0 = node_chanmgrs[0].list_channels();
1848         let channels1 = node_chanmgrs[1].list_channels();
1849         let default_config = UserConfig::default();
1850         assert_eq!(channels0.len(), 1);
1851         assert_eq!(channels1.len(), 1);
1852
1853         let reserve = get_holder_selected_channel_reserve_satoshis(100_000, &default_config);
1854         assert_eq!(channels0[0].inbound_capacity_msat, 95000000 - reserve*1000);
1855         assert_eq!(channels1[0].outbound_capacity_msat, 95000000 - reserve*1000);
1856
1857         assert_eq!(channels0[0].outbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
1858         assert_eq!(channels1[0].inbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
1859 }
1860
1861 fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64, channel_type_features: &ChannelTypeFeatures) -> u64 {
1862         (commitment_tx_base_weight(channel_type_features) + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000
1863 }
1864
1865 #[test]
1866 fn test_channel_reserve_holding_cell_htlcs() {
1867         let chanmon_cfgs = create_chanmon_cfgs(3);
1868         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1869         // When this test was written, the default base fee floated based on the HTLC count.
1870         // It is now fixed, so we simply set the fee to the expected value here.
1871         let mut config = test_default_channel_config();
1872         config.channel_config.forwarding_fee_base_msat = 239;
1873         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
1874         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1875         let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 190000, 1001);
1876         let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 190000, 1001);
1877
1878         let mut stat01 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
1879         let mut stat11 = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2);
1880
1881         let mut stat12 = get_channel_value_stat!(nodes[1], nodes[2], chan_2.2);
1882         let mut stat22 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2);
1883
1884         macro_rules! expect_forward {
1885                 ($node: expr) => {{
1886                         let mut events = $node.node.get_and_clear_pending_msg_events();
1887                         assert_eq!(events.len(), 1);
1888                         check_added_monitors!($node, 1);
1889                         let payment_event = SendEvent::from_event(events.remove(0));
1890                         payment_event
1891                 }}
1892         }
1893
1894         let feemsat = 239; // set above
1895         let total_fee_msat = (nodes.len() - 2) as u64 * feemsat;
1896         let feerate = get_feerate!(nodes[0], nodes[1], chan_1.2);
1897         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_1.2);
1898
1899         let recv_value_0 = stat01.counterparty_max_htlc_value_in_flight_msat - total_fee_msat;
1900
1901         // attempt to send amt_msat > their_max_htlc_value_in_flight_msat
1902         {
1903                 let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
1904                         .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0);
1905                 let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, recv_value_0);
1906                 route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
1907                 assert!(route.paths[0].hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
1908
1909                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1910                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1911                         ), true, APIError::ChannelUnavailable { .. }, {});
1912                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1913         }
1914
1915         // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete
1916         // nodes[0]'s wealth
1917         loop {
1918                 let amt_msat = recv_value_0 + total_fee_msat;
1919                 // 3 for the 3 HTLCs that will be sent, 2* and +1 for the fee spike reserve.
1920                 // Also, ensure that each payment has enough to be over the dust limit to
1921                 // ensure it'll be included in each commit tx fee calculation.
1922                 let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features);
1923                 let ensure_htlc_amounts_above_dust_buffer = 3 * (stat01.counterparty_dust_limit_msat + 1000);
1924                 if stat01.value_to_self_msat < stat01.channel_reserve_msat + commit_tx_fee_all_htlcs + ensure_htlc_amounts_above_dust_buffer + amt_msat {
1925                         break;
1926                 }
1927
1928                 let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
1929                         .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0);
1930                 let route = get_route!(nodes[0], payment_params, recv_value_0).unwrap();
1931                 let (payment_preimage, ..) = send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], recv_value_0);
1932                 claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
1933
1934                 let (stat01_, stat11_, stat12_, stat22_) = (
1935                         get_channel_value_stat!(nodes[0], nodes[1], chan_1.2),
1936                         get_channel_value_stat!(nodes[1], nodes[0], chan_1.2),
1937                         get_channel_value_stat!(nodes[1], nodes[2], chan_2.2),
1938                         get_channel_value_stat!(nodes[2], nodes[1], chan_2.2),
1939                 );
1940
1941                 assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat);
1942                 assert_eq!(stat11_.value_to_self_msat, stat11.value_to_self_msat + amt_msat);
1943                 assert_eq!(stat12_.value_to_self_msat, stat12.value_to_self_msat - (amt_msat - feemsat));
1944                 assert_eq!(stat22_.value_to_self_msat, stat22.value_to_self_msat + (amt_msat - feemsat));
1945                 stat01 = stat01_; stat11 = stat11_; stat12 = stat12_; stat22 = stat22_;
1946         }
1947
1948         // adding pending output.
1949         // 2* and +1 HTLCs on the commit tx fee for the fee spike reserve.
1950         // The reason we're dividing by two here is as follows: the dividend is the total outbound liquidity
1951         // after fees, the channel reserve, and the fee spike buffer are removed. We eventually want to
1952         // divide this quantity into 3 portions, that will each be sent in an HTLC. This allows us
1953         // to test channel channel reserve policy at the edges of what amount is sendable, i.e.
1954         // cases where 1 msat over X amount will cause a payment failure, but anything less than
1955         // that can be sent successfully. So, dividing by two is a somewhat arbitrary way of getting
1956         // the amount of the first of these aforementioned 3 payments. The reason we split into 3 payments
1957         // is to test the behavior of the holding cell with respect to channel reserve and commit tx fee
1958         // policy.
1959         let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features);
1960         let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs)/2;
1961         let amt_msat_1 = recv_value_1 + total_fee_msat;
1962
1963         let (route_1, our_payment_hash_1, our_payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_1);
1964         let payment_event_1 = {
1965                 nodes[0].node.send_payment_with_route(&route_1, our_payment_hash_1,
1966                         RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
1967                 check_added_monitors!(nodes[0], 1);
1968
1969                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1970                 assert_eq!(events.len(), 1);
1971                 SendEvent::from_event(events.remove(0))
1972         };
1973         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
1974
1975         // channel reserve test with htlc pending output > 0
1976         let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs;
1977         {
1978                 let mut route = route_1.clone();
1979                 route.paths[0].hops.last_mut().unwrap().fee_msat = recv_value_2 + 1;
1980                 let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]);
1981                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
1982                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
1983                         ), true, APIError::ChannelUnavailable { .. }, {});
1984                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1985         }
1986
1987         // split the rest to test holding cell
1988         let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features);
1989         let additional_htlc_cost_msat = commit_tx_fee_3_htlcs - commit_tx_fee_2_htlcs;
1990         let recv_value_21 = recv_value_2/2 - additional_htlc_cost_msat/2;
1991         let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat - additional_htlc_cost_msat;
1992         {
1993                 let stat = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
1994                 assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat + commit_tx_fee_3_htlcs), stat.channel_reserve_msat);
1995         }
1996
1997         // now see if they go through on both sides
1998         let (route_21, our_payment_hash_21, our_payment_preimage_21, our_payment_secret_21) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_21);
1999         // but this will stuck in the holding cell
2000         nodes[0].node.send_payment_with_route(&route_21, our_payment_hash_21,
2001                 RecipientOnionFields::secret_only(our_payment_secret_21), PaymentId(our_payment_hash_21.0)).unwrap();
2002         check_added_monitors!(nodes[0], 0);
2003         let events = nodes[0].node.get_and_clear_pending_events();
2004         assert_eq!(events.len(), 0);
2005
2006         // test with outbound holding cell amount > 0
2007         {
2008                 let (mut route, our_payment_hash, _, our_payment_secret) =
2009                         get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22);
2010                 route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
2011                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
2012                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
2013                         ), true, APIError::ChannelUnavailable { .. }, {});
2014                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2015         }
2016
2017         let (route_22, our_payment_hash_22, our_payment_preimage_22, our_payment_secret_22) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22);
2018         // this will also stuck in the holding cell
2019         nodes[0].node.send_payment_with_route(&route_22, our_payment_hash_22,
2020                 RecipientOnionFields::secret_only(our_payment_secret_22), PaymentId(our_payment_hash_22.0)).unwrap();
2021         check_added_monitors!(nodes[0], 0);
2022         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
2023         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2024
2025         // flush the pending htlc
2026         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg);
2027         let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2028         check_added_monitors!(nodes[1], 1);
2029
2030         // the pending htlc should be promoted to committed
2031         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack);
2032         check_added_monitors!(nodes[0], 1);
2033         let commitment_update_2 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2034
2035         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed);
2036         let bs_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2037         // No commitment_signed so get_event_msg's assert(len == 1) passes
2038         check_added_monitors!(nodes[0], 1);
2039
2040         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack);
2041         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2042         check_added_monitors!(nodes[1], 1);
2043
2044         expect_pending_htlcs_forwardable!(nodes[1]);
2045
2046         let ref payment_event_11 = expect_forward!(nodes[1]);
2047         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]);
2048         commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false);
2049
2050         expect_pending_htlcs_forwardable!(nodes[2]);
2051         expect_payment_claimable!(nodes[2], our_payment_hash_1, our_payment_secret_1, recv_value_1);
2052
2053         // flush the htlcs in the holding cell
2054         assert_eq!(commitment_update_2.update_add_htlcs.len(), 2);
2055         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]);
2056         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]);
2057         commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false);
2058         expect_pending_htlcs_forwardable!(nodes[1]);
2059
2060         let ref payment_event_3 = expect_forward!(nodes[1]);
2061         assert_eq!(payment_event_3.msgs.len(), 2);
2062         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[0]);
2063         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]);
2064
2065         commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false);
2066         expect_pending_htlcs_forwardable!(nodes[2]);
2067
2068         let events = nodes[2].node.get_and_clear_pending_events();
2069         assert_eq!(events.len(), 2);
2070         match events[0] {
2071                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
2072                         assert_eq!(our_payment_hash_21, *payment_hash);
2073                         assert_eq!(recv_value_21, amount_msat);
2074                         assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
2075                         assert_eq!(via_channel_id, Some(chan_2.2));
2076                         match &purpose {
2077                                 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
2078                                         assert!(payment_preimage.is_none());
2079                                         assert_eq!(our_payment_secret_21, *payment_secret);
2080                                 },
2081                                 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
2082                         }
2083                 },
2084                 _ => panic!("Unexpected event"),
2085         }
2086         match events[1] {
2087                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
2088                         assert_eq!(our_payment_hash_22, *payment_hash);
2089                         assert_eq!(recv_value_22, amount_msat);
2090                         assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
2091                         assert_eq!(via_channel_id, Some(chan_2.2));
2092                         match &purpose {
2093                                 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
2094                                         assert!(payment_preimage.is_none());
2095                                         assert_eq!(our_payment_secret_22, *payment_secret);
2096                                 },
2097                                 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
2098                         }
2099                 },
2100                 _ => panic!("Unexpected event"),
2101         }
2102
2103         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_1);
2104         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21);
2105         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22);
2106
2107         let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1, &channel_type_features);
2108         let recv_value_3 = commit_tx_fee_2_htlcs - commit_tx_fee_0_htlcs - total_fee_msat;
2109         send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_3);
2110
2111         let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
2112         let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat) - (recv_value_3 + total_fee_msat);
2113         let stat0 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
2114         assert_eq!(stat0.value_to_self_msat, expected_value_to_self);
2115         assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat + commit_tx_fee_1_htlc);
2116
2117         let stat2 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2);
2118         assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22 + recv_value_3);
2119 }
2120
2121 #[test]
2122 fn channel_reserve_in_flight_removes() {
2123         // In cases where one side claims an HTLC, it thinks it has additional available funds that it
2124         // can send to its counterparty, but due to update ordering, the other side may not yet have
2125         // considered those HTLCs fully removed.
2126         // This tests that we don't count HTLCs which will not be included in the next remote
2127         // commitment transaction towards the reserve value (as it implies no commitment transaction
2128         // will be generated which violates the remote reserve value).
2129         // This was broken previously, and discovered by the chanmon_fail_consistency fuzz test.
2130         // To test this we:
2131         //  * route two HTLCs from A to B (note that, at a high level, this test is checking that, when
2132         //    you consider the values of both of these HTLCs, B may not send an HTLC back to A, but if
2133         //    you only consider the value of the first HTLC, it may not),
2134         //  * start routing a third HTLC from A to B,
2135         //  * claim the first two HTLCs (though B will generate an update_fulfill for one, and put
2136         //    the other claim in its holding cell, as it immediately goes into AwaitingRAA),
2137         //  * deliver the first fulfill from B
2138         //  * deliver the update_add and an RAA from A, resulting in B freeing the second holding cell
2139         //    claim,
2140         //  * deliver A's response CS and RAA.
2141         //    This results in A having the second HTLC in AwaitingRemovedRemoteRevoke, but B having
2142         //    removed it fully. B now has the push_msat plus the first two HTLCs in value.
2143         //  * Now B happily sends another HTLC, potentially violating its reserve value from A's point
2144         //    of view (if A counts the AwaitingRemovedRemoteRevoke HTLC).
2145         let chanmon_cfgs = create_chanmon_cfgs(2);
2146         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2147         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2148         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2149         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2150
2151         let b_chan_values = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2);
2152         // Route the first two HTLCs.
2153         let payment_value_1 = b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000;
2154         let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], payment_value_1);
2155         let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 20_000);
2156
2157         // Start routing the third HTLC (this is just used to get everyone in the right state).
2158         let (route, payment_hash_3, payment_preimage_3, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
2159         let send_1 = {
2160                 nodes[0].node.send_payment_with_route(&route, payment_hash_3,
2161                         RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
2162                 check_added_monitors!(nodes[0], 1);
2163                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2164                 assert_eq!(events.len(), 1);
2165                 SendEvent::from_event(events.remove(0))
2166         };
2167
2168         // Now claim both of the first two HTLCs on B's end, putting B in AwaitingRAA and generating an
2169         // initial fulfill/CS.
2170         nodes[1].node.claim_funds(payment_preimage_1);
2171         expect_payment_claimed!(nodes[1], payment_hash_1, payment_value_1);
2172         check_added_monitors!(nodes[1], 1);
2173         let bs_removes = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2174
2175         // This claim goes in B's holding cell, allowing us to have a pending B->A RAA which does not
2176         // remove the second HTLC when we send the HTLC back from B to A.
2177         nodes[1].node.claim_funds(payment_preimage_2);
2178         expect_payment_claimed!(nodes[1], payment_hash_2, 20_000);
2179         check_added_monitors!(nodes[1], 1);
2180         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2181
2182         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_removes.update_fulfill_htlcs[0]);
2183         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed);
2184         check_added_monitors!(nodes[0], 1);
2185         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2186         expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
2187
2188         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]);
2189         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg);
2190         check_added_monitors!(nodes[1], 1);
2191         // B is already AwaitingRAA, so cant generate a CS here
2192         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2193
2194         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2195         check_added_monitors!(nodes[1], 1);
2196         let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2197
2198         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2199         check_added_monitors!(nodes[0], 1);
2200         let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2201
2202         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2203         check_added_monitors!(nodes[1], 1);
2204         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2205
2206         // The second HTLCis removed, but as A is in AwaitingRAA it can't generate a CS here, so the
2207         // RAA that B generated above doesn't fully resolve the second HTLC from A's point of view.
2208         // However, the RAA A generates here *does* fully resolve the HTLC from B's point of view (as A
2209         // can no longer broadcast a commitment transaction with it and B has the preimage so can go
2210         // on-chain as necessary).
2211         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_cs.update_fulfill_htlcs[0]);
2212         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
2213         check_added_monitors!(nodes[0], 1);
2214         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2215         expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false);
2216
2217         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2218         check_added_monitors!(nodes[1], 1);
2219         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2220
2221         expect_pending_htlcs_forwardable!(nodes[1]);
2222         expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 100000);
2223
2224         // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't
2225         // resolve the second HTLC from A's point of view.
2226         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2227         check_added_monitors!(nodes[0], 1);
2228         expect_payment_path_successful!(nodes[0]);
2229         let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2230
2231         // Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back
2232         // to A to ensure that A doesn't count the almost-removed HTLC in update_add processing.
2233         let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[1], nodes[0], 10000);
2234         let send_2 = {
2235                 nodes[1].node.send_payment_with_route(&route, payment_hash_4,
2236                         RecipientOnionFields::secret_only(payment_secret_4), PaymentId(payment_hash_4.0)).unwrap();
2237                 check_added_monitors!(nodes[1], 1);
2238                 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
2239                 assert_eq!(events.len(), 1);
2240                 SendEvent::from_event(events.remove(0))
2241         };
2242
2243         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_2.msgs[0]);
2244         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_2.commitment_msg);
2245         check_added_monitors!(nodes[0], 1);
2246         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2247
2248         // Now just resolve all the outstanding messages/HTLCs for completeness...
2249
2250         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2251         check_added_monitors!(nodes[1], 1);
2252         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2253
2254         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2255         check_added_monitors!(nodes[1], 1);
2256
2257         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2258         check_added_monitors!(nodes[0], 1);
2259         expect_payment_path_successful!(nodes[0]);
2260         let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2261
2262         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2263         check_added_monitors!(nodes[1], 1);
2264         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2265
2266         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2267         check_added_monitors!(nodes[0], 1);
2268
2269         expect_pending_htlcs_forwardable!(nodes[0]);
2270         expect_payment_claimable!(nodes[0], payment_hash_4, payment_secret_4, 10000);
2271
2272         claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4);
2273         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
2274 }
2275
2276 #[test]
2277 fn channel_monitor_network_test() {
2278         // Simple test which builds a network of ChannelManagers, connects them to each other, and
2279         // tests that ChannelMonitor is able to recover from various states.
2280         let chanmon_cfgs = create_chanmon_cfgs(5);
2281         let node_cfgs = create_node_cfgs(5, &chanmon_cfgs);
2282         let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &[None, None, None, None, None]);
2283         let nodes = create_network(5, &node_cfgs, &node_chanmgrs);
2284
2285         // Create some initial channels
2286         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2287         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
2288         let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
2289         let chan_4 = create_announced_chan_between_nodes(&nodes, 3, 4);
2290
2291         // Make sure all nodes are at the same starting height
2292         connect_blocks(&nodes[0], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
2293         connect_blocks(&nodes[1], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
2294         connect_blocks(&nodes[2], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
2295         connect_blocks(&nodes[3], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[3].best_block_info().1);
2296         connect_blocks(&nodes[4], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[4].best_block_info().1);
2297
2298         // Rebalance the network a bit by relaying one payment through all the channels...
2299         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2300         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2301         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2302         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2303
2304         // Simple case with no pending HTLCs:
2305         nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
2306         check_added_monitors!(nodes[1], 1);
2307         check_closed_broadcast!(nodes[1], true);
2308         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
2309         {
2310                 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
2311                 assert_eq!(node_txn.len(), 1);
2312                 mine_transaction(&nodes[1], &node_txn[0]);
2313                 if nodes[1].connect_style.borrow().updates_best_block_first() {
2314                         let _ = nodes[1].tx_broadcaster.txn_broadcast();
2315                 }
2316
2317                 mine_transaction(&nodes[0], &node_txn[0]);
2318                 check_added_monitors!(nodes[0], 1);
2319                 test_txn_broadcast(&nodes[0], &chan_1, Some(node_txn[0].clone()), HTLCType::NONE);
2320         }
2321         check_closed_broadcast!(nodes[0], true);
2322         assert_eq!(nodes[0].node.list_channels().len(), 0);
2323         assert_eq!(nodes[1].node.list_channels().len(), 1);
2324         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2325
2326         // One pending HTLC is discarded by the force-close:
2327         let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000);
2328
2329         // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
2330         // broadcasted until we reach the timelock time).
2331         nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
2332         check_closed_broadcast!(nodes[1], true);
2333         check_added_monitors!(nodes[1], 1);
2334         {
2335                 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE);
2336                 connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1);
2337                 test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
2338                 mine_transaction(&nodes[2], &node_txn[0]);
2339                 check_added_monitors!(nodes[2], 1);
2340                 test_txn_broadcast(&nodes[2], &chan_2, Some(node_txn[0].clone()), HTLCType::NONE);
2341         }
2342         check_closed_broadcast!(nodes[2], true);
2343         assert_eq!(nodes[1].node.list_channels().len(), 0);
2344         assert_eq!(nodes[2].node.list_channels().len(), 1);
2345         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
2346         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2347
2348         macro_rules! claim_funds {
2349                 ($node: expr, $prev_node: expr, $preimage: expr, $payment_hash: expr) => {
2350                         {
2351                                 $node.node.claim_funds($preimage);
2352                                 expect_payment_claimed!($node, $payment_hash, 3_000_000);
2353                                 check_added_monitors!($node, 1);
2354
2355                                 let events = $node.node.get_and_clear_pending_msg_events();
2356                                 assert_eq!(events.len(), 1);
2357                                 match events[0] {
2358                                         MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. } } => {
2359                                                 assert!(update_add_htlcs.is_empty());
2360                                                 assert!(update_fail_htlcs.is_empty());
2361                                                 assert_eq!(*node_id, $prev_node.node.get_our_node_id());
2362                                         },
2363                                         _ => panic!("Unexpected event"),
2364                                 };
2365                         }
2366                 }
2367         }
2368
2369         // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
2370         // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
2371         nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap();
2372         check_added_monitors!(nodes[2], 1);
2373         check_closed_broadcast!(nodes[2], true);
2374         let node2_commitment_txid;
2375         {
2376                 let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE);
2377                 connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1);
2378                 test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT);
2379                 node2_commitment_txid = node_txn[0].txid();
2380
2381                 // Claim the payment on nodes[3], giving it knowledge of the preimage
2382                 claim_funds!(nodes[3], nodes[2], payment_preimage_1, payment_hash_1);
2383                 mine_transaction(&nodes[3], &node_txn[0]);
2384                 check_added_monitors!(nodes[3], 1);
2385                 check_preimage_claim(&nodes[3], &node_txn);
2386         }
2387         check_closed_broadcast!(nodes[3], true);
2388         assert_eq!(nodes[2].node.list_channels().len(), 0);
2389         assert_eq!(nodes[3].node.list_channels().len(), 1);
2390         check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000);
2391         check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
2392
2393         // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
2394         // confusing us in the following tests.
2395         let chan_3_mon = nodes[3].chain_monitor.chain_monitor.remove_monitor(&OutPoint { txid: chan_3.3.txid(), index: 0 });
2396
2397         // One pending HTLC to time out:
2398         let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[3], &[&nodes[4]], 3_000_000);
2399         // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for
2400         // buffer space).
2401
2402         let (close_chan_update_1, close_chan_update_2) = {
2403                 connect_blocks(&nodes[3], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
2404                 let events = nodes[3].node.get_and_clear_pending_msg_events();
2405                 assert_eq!(events.len(), 2);
2406                 let close_chan_update_1 = match events[1] {
2407                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
2408                                 msg.clone()
2409                         },
2410                         _ => panic!("Unexpected event"),
2411                 };
2412                 match events[0] {
2413                         MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
2414                                 assert_eq!(node_id, nodes[4].node.get_our_node_id());
2415                         },
2416                         _ => panic!("Unexpected event"),
2417                 }
2418                 check_added_monitors!(nodes[3], 1);
2419
2420                 // Clear bumped claiming txn spending node 2 commitment tx. Bumped txn are generated after reaching some height timer.
2421                 {
2422                         let mut node_txn = nodes[3].tx_broadcaster.txn_broadcasted.lock().unwrap();
2423                         node_txn.retain(|tx| {
2424                                 if tx.input[0].previous_output.txid == node2_commitment_txid {
2425                                         false
2426                                 } else { true }
2427                         });
2428                 }
2429
2430                 let node_txn = test_txn_broadcast(&nodes[3], &chan_4, None, HTLCType::TIMEOUT);
2431
2432                 // Claim the payment on nodes[4], giving it knowledge of the preimage
2433                 claim_funds!(nodes[4], nodes[3], payment_preimage_2, payment_hash_2);
2434
2435                 connect_blocks(&nodes[4], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2);
2436                 let events = nodes[4].node.get_and_clear_pending_msg_events();
2437                 assert_eq!(events.len(), 2);
2438                 let close_chan_update_2 = match events[1] {
2439                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
2440                                 msg.clone()
2441                         },
2442                         _ => panic!("Unexpected event"),
2443                 };
2444                 match events[0] {
2445                         MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
2446                                 assert_eq!(node_id, nodes[3].node.get_our_node_id());
2447                         },
2448                         _ => panic!("Unexpected event"),
2449                 }
2450                 check_added_monitors!(nodes[4], 1);
2451                 test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
2452                 check_closed_event!(nodes[4], 1, ClosureReason::HTLCsTimedOut, [nodes[3].node.get_our_node_id()], 100000);
2453
2454                 mine_transaction(&nodes[4], &node_txn[0]);
2455                 check_preimage_claim(&nodes[4], &node_txn);
2456                 (close_chan_update_1, close_chan_update_2)
2457         };
2458         nodes[3].gossip_sync.handle_channel_update(&close_chan_update_2).unwrap();
2459         nodes[4].gossip_sync.handle_channel_update(&close_chan_update_1).unwrap();
2460         assert_eq!(nodes[3].node.list_channels().len(), 0);
2461         assert_eq!(nodes[4].node.list_channels().len(), 0);
2462
2463         assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon),
2464                 Ok(ChannelMonitorUpdateStatus::Completed));
2465         check_closed_event!(nodes[3], 1, ClosureReason::HTLCsTimedOut, [nodes[4].node.get_our_node_id()], 100000);
2466 }
2467
2468 #[test]
2469 fn test_justice_tx_htlc_timeout() {
2470         // Test justice txn built on revoked HTLC-Timeout tx, against both sides
2471         let mut alice_config = test_default_channel_config();
2472         alice_config.channel_handshake_config.announced_channel = true;
2473         alice_config.channel_handshake_limits.force_announced_channel_preference = false;
2474         alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5;
2475         let mut bob_config = test_default_channel_config();
2476         bob_config.channel_handshake_config.announced_channel = true;
2477         bob_config.channel_handshake_limits.force_announced_channel_preference = false;
2478         bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3;
2479         let user_cfgs = [Some(alice_config), Some(bob_config)];
2480         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2481         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2482         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
2483         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2484         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
2485         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2486         // Create some new channels:
2487         let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1);
2488
2489         // A pending HTLC which will be revoked:
2490         let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2491         // Get the will-be-revoked local txn from nodes[0]
2492         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_5.2);
2493         assert_eq!(revoked_local_txn.len(), 2); // First commitment tx, then HTLC tx
2494         assert_eq!(revoked_local_txn[0].input.len(), 1);
2495         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_5.3.txid());
2496         assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to 0 are present
2497         assert_eq!(revoked_local_txn[1].input.len(), 1);
2498         assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
2499         assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
2500         // Revoke the old state
2501         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
2502
2503         {
2504                 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2505                 {
2506                         let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
2507                         assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2508                         assert_eq!(node_txn[0].input.len(), 2); // We should claim the revoked output and the HTLC output
2509                         check_spends!(node_txn[0], revoked_local_txn[0]);
2510                         node_txn.swap_remove(0);
2511                 }
2512                 check_added_monitors!(nodes[1], 1);
2513                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2514                 test_txn_broadcast(&nodes[1], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
2515
2516                 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2517                 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
2518                 // Verify broadcast of revoked HTLC-timeout
2519                 let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
2520                 check_added_monitors!(nodes[0], 1);
2521                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2522                 // Broadcast revoked HTLC-timeout on node 1
2523                 mine_transaction(&nodes[1], &node_txn[1]);
2524                 test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone());
2525         }
2526         get_announce_close_broadcast_events(&nodes, 0, 1);
2527         assert_eq!(nodes[0].node.list_channels().len(), 0);
2528         assert_eq!(nodes[1].node.list_channels().len(), 0);
2529 }
2530
2531 #[test]
2532 fn test_justice_tx_htlc_success() {
2533         // Test justice txn built on revoked HTLC-Success tx, against both sides
2534         let mut alice_config = test_default_channel_config();
2535         alice_config.channel_handshake_config.announced_channel = true;
2536         alice_config.channel_handshake_limits.force_announced_channel_preference = false;
2537         alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5;
2538         let mut bob_config = test_default_channel_config();
2539         bob_config.channel_handshake_config.announced_channel = true;
2540         bob_config.channel_handshake_limits.force_announced_channel_preference = false;
2541         bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3;
2542         let user_cfgs = [Some(alice_config), Some(bob_config)];
2543         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2544         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2545         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
2546         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2547         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
2548         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2549         // Create some new channels:
2550         let chan_6 = create_announced_chan_between_nodes(&nodes, 0, 1);
2551
2552         // A pending HTLC which will be revoked:
2553         let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2554         // Get the will-be-revoked local txn from B
2555         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_6.2);
2556         assert_eq!(revoked_local_txn.len(), 1); // Only commitment tx
2557         assert_eq!(revoked_local_txn[0].input.len(), 1);
2558         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_6.3.txid());
2559         assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to A are present
2560         // Revoke the old state
2561         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_4);
2562         {
2563                 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2564                 {
2565                         let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
2566                         assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2567                         assert_eq!(node_txn[0].input.len(), 1); // We claim the received HTLC output
2568
2569                         check_spends!(node_txn[0], revoked_local_txn[0]);
2570                         node_txn.swap_remove(0);
2571                 }
2572                 check_added_monitors!(nodes[0], 1);
2573                 test_txn_broadcast(&nodes[0], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
2574
2575                 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2576                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2577                 let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
2578                 check_added_monitors!(nodes[1], 1);
2579                 mine_transaction(&nodes[0], &node_txn[1]);
2580                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2581                 test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone());
2582         }
2583         get_announce_close_broadcast_events(&nodes, 0, 1);
2584         assert_eq!(nodes[0].node.list_channels().len(), 0);
2585         assert_eq!(nodes[1].node.list_channels().len(), 0);
2586 }
2587
2588 #[test]
2589 fn revoked_output_claim() {
2590         // Simple test to ensure a node will claim a revoked output when a stale remote commitment
2591         // transaction is broadcast by its counterparty
2592         let chanmon_cfgs = create_chanmon_cfgs(2);
2593         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2594         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2595         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2596         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2597         // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output
2598         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2599         assert_eq!(revoked_local_txn.len(), 1);
2600         // Only output is the full channel value back to nodes[0]:
2601         assert_eq!(revoked_local_txn[0].output.len(), 1);
2602         // Send a payment through, updating everyone's latest commitment txn
2603         send_payment(&nodes[0], &vec!(&nodes[1])[..], 5000000);
2604
2605         // Inform nodes[1] that nodes[0] broadcast a stale tx
2606         mine_transaction(&nodes[1], &revoked_local_txn[0]);
2607         check_added_monitors!(nodes[1], 1);
2608         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2609         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
2610         assert_eq!(node_txn.len(), 1); // ChannelMonitor: justice tx against revoked to_local output
2611
2612         check_spends!(node_txn[0], revoked_local_txn[0]);
2613
2614         // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
2615         mine_transaction(&nodes[0], &revoked_local_txn[0]);
2616         get_announce_close_broadcast_events(&nodes, 0, 1);
2617         check_added_monitors!(nodes[0], 1);
2618         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2619 }
2620
2621 #[test]
2622 fn test_forming_justice_tx_from_monitor_updates() {
2623         do_test_forming_justice_tx_from_monitor_updates(true);
2624         do_test_forming_justice_tx_from_monitor_updates(false);
2625 }
2626
2627 fn do_test_forming_justice_tx_from_monitor_updates(broadcast_initial_commitment: bool) {
2628         // Simple test to make sure that the justice tx formed in WatchtowerPersister
2629         // is properly formed and can be broadcasted/confirmed successfully in the event
2630         // that a revoked commitment transaction is broadcasted
2631         // (Similar to `revoked_output_claim` test but we get the justice tx + broadcast manually)
2632         let chanmon_cfgs = create_chanmon_cfgs(2);
2633         let destination_script0 = chanmon_cfgs[0].keys_manager.get_destination_script([0; 32]).unwrap();
2634         let destination_script1 = chanmon_cfgs[1].keys_manager.get_destination_script([0; 32]).unwrap();
2635         let persisters = vec![WatchtowerPersister::new(destination_script0),
2636                 WatchtowerPersister::new(destination_script1)];
2637         let node_cfgs = create_node_cfgs_with_persisters(2, &chanmon_cfgs, persisters.iter().collect());
2638         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2639         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2640         let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
2641         let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 };
2642
2643         if !broadcast_initial_commitment {
2644                 // Send a payment to move the channel forward
2645                 send_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000);
2646         }
2647
2648         // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output.
2649         // We'll keep this commitment transaction to broadcast once it's revoked.
2650         let revoked_local_txn = get_local_commitment_txn!(nodes[0], channel_id);
2651         assert_eq!(revoked_local_txn.len(), 1);
2652         let revoked_commitment_tx = &revoked_local_txn[0];
2653
2654         // Send another payment, now revoking the previous commitment tx
2655         send_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000);
2656
2657         let justice_tx = persisters[1].justice_tx(funding_txo, &revoked_commitment_tx.txid()).unwrap();
2658         check_spends!(justice_tx, revoked_commitment_tx);
2659
2660         mine_transactions(&nodes[1], &[revoked_commitment_tx, &justice_tx]);
2661         mine_transactions(&nodes[0], &[revoked_commitment_tx, &justice_tx]);
2662
2663         check_added_monitors!(nodes[1], 1);
2664         check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false,
2665                 &[nodes[0].node.get_our_node_id()], 100_000);
2666         get_announce_close_broadcast_events(&nodes, 1, 0);
2667
2668         check_added_monitors!(nodes[0], 1);
2669         check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false,
2670                 &[nodes[1].node.get_our_node_id()], 100_000);
2671
2672         // Check that the justice tx has sent the revoked output value to nodes[1]
2673         let monitor = get_monitor!(nodes[1], channel_id);
2674         let total_claimable_balance = monitor.get_claimable_balances().iter().fold(0, |sum, balance| {
2675                 match balance {
2676                         channelmonitor::Balance::ClaimableAwaitingConfirmations { amount_satoshis, .. } => sum + amount_satoshis,
2677                         _ => panic!("Unexpected balance type"),
2678                 }
2679         });
2680         // On the first commitment, node[1]'s balance was below dust so it didn't have an output
2681         let node1_channel_balance = if broadcast_initial_commitment { 0 } else { revoked_commitment_tx.output[0].value };
2682         let expected_claimable_balance = node1_channel_balance + justice_tx.output[0].value;
2683         assert_eq!(total_claimable_balance, expected_claimable_balance);
2684 }
2685
2686
2687 #[test]
2688 fn claim_htlc_outputs_shared_tx() {
2689         // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx
2690         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2691         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2692         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2693         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2694         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2695
2696         // Create some new channel:
2697         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2698
2699         // Rebalance the network to generate htlc in the two directions
2700         send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
2701         // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx
2702         let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
2703         let (_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
2704
2705         // Get the will-be-revoked local txn from node[0]
2706         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2707         assert_eq!(revoked_local_txn.len(), 2); // commitment tx + 1 HTLC-Timeout tx
2708         assert_eq!(revoked_local_txn[0].input.len(), 1);
2709         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
2710         assert_eq!(revoked_local_txn[1].input.len(), 1);
2711         assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
2712         assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
2713         check_spends!(revoked_local_txn[1], revoked_local_txn[0]);
2714
2715         //Revoke the old state
2716         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
2717
2718         {
2719                 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2720                 check_added_monitors!(nodes[0], 1);
2721                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2722                 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2723                 check_added_monitors!(nodes[1], 1);
2724                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2725                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2726                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
2727
2728                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2729                 assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx
2730
2731                 assert_eq!(node_txn[0].input.len(), 3); // Claim the revoked output + both revoked HTLC outputs
2732                 check_spends!(node_txn[0], revoked_local_txn[0]);
2733
2734                 let mut witness_lens = BTreeSet::new();
2735                 witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
2736                 witness_lens.insert(node_txn[0].input[1].witness.last().unwrap().len());
2737                 witness_lens.insert(node_txn[0].input[2].witness.last().unwrap().len());
2738                 assert_eq!(witness_lens.len(), 3);
2739                 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
2740                 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
2741                 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
2742
2743                 // Finally, mine the penalty transaction and check that we get an HTLC failure after
2744                 // ANTI_REORG_DELAY confirmations.
2745                 mine_transaction(&nodes[1], &node_txn[0]);
2746                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2747                 expect_payment_failed!(nodes[1], payment_hash_2, false);
2748         }
2749         get_announce_close_broadcast_events(&nodes, 0, 1);
2750         assert_eq!(nodes[0].node.list_channels().len(), 0);
2751         assert_eq!(nodes[1].node.list_channels().len(), 0);
2752 }
2753
2754 #[test]
2755 fn claim_htlc_outputs_single_tx() {
2756         // Node revoked old state, htlcs have timed out, claim each of them in separated justice tx
2757         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2758         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2759         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2760         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2761         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2762
2763         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2764
2765         // Rebalance the network to generate htlc in the two directions
2766         send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
2767         // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this
2768         // time as two different claim transactions as we're gonna to timeout htlc with given a high current height
2769         let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
2770         let (_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
2771
2772         // Get the will-be-revoked local txn from node[0]
2773         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2774
2775         //Revoke the old state
2776         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
2777
2778         {
2779                 confirm_transaction_at(&nodes[0], &revoked_local_txn[0], 100);
2780                 check_added_monitors!(nodes[0], 1);
2781                 confirm_transaction_at(&nodes[1], &revoked_local_txn[0], 100);
2782                 check_added_monitors!(nodes[1], 1);
2783                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
2784                 let mut events = nodes[0].node.get_and_clear_pending_events();
2785                 expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
2786                 match events.last().unwrap() {
2787                         Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
2788                         _ => panic!("Unexpected event"),
2789                 }
2790
2791                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2792                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
2793
2794                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcast();
2795
2796                 // Check the pair local commitment and HTLC-timeout broadcast due to HTLC expiration
2797                 assert_eq!(node_txn[0].input.len(), 1);
2798                 check_spends!(node_txn[0], chan_1.3);
2799                 assert_eq!(node_txn[1].input.len(), 1);
2800                 let witness_script = node_txn[1].input[0].witness.last().unwrap();
2801                 assert_eq!(witness_script.len(), OFFERED_HTLC_SCRIPT_WEIGHT); //Spending an offered htlc output
2802                 check_spends!(node_txn[1], node_txn[0]);
2803
2804                 // Filter out any non justice transactions.
2805                 node_txn.retain(|tx| tx.input[0].previous_output.txid == revoked_local_txn[0].txid());
2806                 assert!(node_txn.len() > 3);
2807
2808                 assert_eq!(node_txn[0].input.len(), 1);
2809                 assert_eq!(node_txn[1].input.len(), 1);
2810                 assert_eq!(node_txn[2].input.len(), 1);
2811
2812                 check_spends!(node_txn[0], revoked_local_txn[0]);
2813                 check_spends!(node_txn[1], revoked_local_txn[0]);
2814                 check_spends!(node_txn[2], revoked_local_txn[0]);
2815
2816                 let mut witness_lens = BTreeSet::new();
2817                 witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
2818                 witness_lens.insert(node_txn[1].input[0].witness.last().unwrap().len());
2819                 witness_lens.insert(node_txn[2].input[0].witness.last().unwrap().len());
2820                 assert_eq!(witness_lens.len(), 3);
2821                 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
2822                 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
2823                 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
2824
2825                 // Finally, mine the penalty transactions and check that we get an HTLC failure after
2826                 // ANTI_REORG_DELAY confirmations.
2827                 mine_transaction(&nodes[1], &node_txn[0]);
2828                 mine_transaction(&nodes[1], &node_txn[1]);
2829                 mine_transaction(&nodes[1], &node_txn[2]);
2830                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2831                 expect_payment_failed!(nodes[1], payment_hash_2, false);
2832         }
2833         get_announce_close_broadcast_events(&nodes, 0, 1);
2834         assert_eq!(nodes[0].node.list_channels().len(), 0);
2835         assert_eq!(nodes[1].node.list_channels().len(), 0);
2836 }
2837
2838 #[test]
2839 fn test_htlc_on_chain_success() {
2840         // Test that in case of a unilateral close onchain, we detect the state of output and pass
2841         // the preimage backward accordingly. So here we test that ChannelManager is
2842         // broadcasting the right event to other nodes in payment path.
2843         // We test with two HTLCs simultaneously as that was not handled correctly in the past.
2844         // A --------------------> B ----------------------> C (preimage)
2845         // First, C should claim the HTLC outputs via HTLC-Success when its own latest local
2846         // commitment transaction was broadcast.
2847         // Then, B should learn the preimage from said transactions, attempting to claim backwards
2848         // towards B.
2849         // B should be able to claim via preimage if A then broadcasts its local tx.
2850         // Finally, when A sees B's latest local commitment transaction it should be able to claim
2851         // the HTLC outputs via the preimage it learned (which, once confirmed should generate a
2852         // PaymentSent event).
2853
2854         let chanmon_cfgs = create_chanmon_cfgs(3);
2855         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2856         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2857         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2858
2859         // Create some initial channels
2860         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
2861         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
2862
2863         // Ensure all nodes are at the same height
2864         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
2865         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
2866         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
2867         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
2868
2869         // Rebalance the network a bit by relaying one payment through all the channels...
2870         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2871         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2872
2873         let (our_payment_preimage, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
2874         let (our_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
2875
2876         // Broadcast legit commitment tx from C on B's chain
2877         // Broadcast HTLC Success transaction by C on received output from C's commitment tx on B's chain
2878         let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
2879         assert_eq!(commitment_tx.len(), 1);
2880         check_spends!(commitment_tx[0], chan_2.3);
2881         nodes[2].node.claim_funds(our_payment_preimage);
2882         expect_payment_claimed!(nodes[2], payment_hash_1, 3_000_000);
2883         nodes[2].node.claim_funds(our_payment_preimage_2);
2884         expect_payment_claimed!(nodes[2], payment_hash_2, 3_000_000);
2885         check_added_monitors!(nodes[2], 2);
2886         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2887         assert!(updates.update_add_htlcs.is_empty());
2888         assert!(updates.update_fail_htlcs.is_empty());
2889         assert!(updates.update_fail_malformed_htlcs.is_empty());
2890         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2891
2892         mine_transaction(&nodes[2], &commitment_tx[0]);
2893         check_closed_broadcast!(nodes[2], true);
2894         check_added_monitors!(nodes[2], 1);
2895         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
2896         let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 2 (2 * HTLC-Success tx)
2897         assert_eq!(node_txn.len(), 2);
2898         check_spends!(node_txn[0], commitment_tx[0]);
2899         check_spends!(node_txn[1], commitment_tx[0]);
2900         assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2901         assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2902         assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2903         assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2904         assert_eq!(node_txn[0].lock_time, LockTime::ZERO);
2905         assert_eq!(node_txn[1].lock_time, LockTime::ZERO);
2906
2907         // Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward
2908         connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), node_txn[0].clone(), node_txn[1].clone()]));
2909         connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
2910         {
2911                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
2912                 assert_eq!(added_monitors.len(), 1);
2913                 assert_eq!(added_monitors[0].0.txid, chan_2.3.txid());
2914                 added_monitors.clear();
2915         }
2916         let forwarded_events = nodes[1].node.get_and_clear_pending_events();
2917         assert_eq!(forwarded_events.len(), 3);
2918         match forwarded_events[0] {
2919                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
2920                 _ => panic!("Unexpected event"),
2921         }
2922         let chan_id = Some(chan_1.2);
2923         match forwarded_events[1] {
2924                 Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
2925                         next_channel_id, outbound_amount_forwarded_msat, ..
2926                 } => {
2927                         assert_eq!(total_fee_earned_msat, Some(1000));
2928                         assert_eq!(prev_channel_id, chan_id);
2929                         assert_eq!(claim_from_onchain_tx, true);
2930                         assert_eq!(next_channel_id, Some(chan_2.2));
2931                         assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
2932                 },
2933                 _ => panic!()
2934         }
2935         match forwarded_events[2] {
2936                 Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
2937                         next_channel_id, outbound_amount_forwarded_msat, ..
2938                 } => {
2939                         assert_eq!(total_fee_earned_msat, Some(1000));
2940                         assert_eq!(prev_channel_id, chan_id);
2941                         assert_eq!(claim_from_onchain_tx, true);
2942                         assert_eq!(next_channel_id, Some(chan_2.2));
2943                         assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
2944                 },
2945                 _ => panic!()
2946         }
2947         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
2948         {
2949                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
2950                 assert_eq!(added_monitors.len(), 2);
2951                 assert_eq!(added_monitors[0].0.txid, chan_1.3.txid());
2952                 assert_eq!(added_monitors[1].0.txid, chan_1.3.txid());
2953                 added_monitors.clear();
2954         }
2955         assert_eq!(events.len(), 3);
2956
2957         let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
2958         let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events);
2959
2960         match nodes_2_event {
2961                 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id: _ } => {},
2962                 _ => panic!("Unexpected event"),
2963         }
2964
2965         match nodes_0_event {
2966                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
2967                         assert!(update_add_htlcs.is_empty());
2968                         assert!(update_fail_htlcs.is_empty());
2969                         assert_eq!(update_fulfill_htlcs.len(), 1);
2970                         assert!(update_fail_malformed_htlcs.is_empty());
2971                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
2972                 },
2973                 _ => panic!("Unexpected event"),
2974         };
2975
2976         // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
2977         match events[0] {
2978                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
2979                 _ => panic!("Unexpected event"),
2980         }
2981
2982         macro_rules! check_tx_local_broadcast {
2983                 ($node: expr, $htlc_offered: expr, $commitment_tx: expr) => { {
2984                         let mut node_txn = $node.tx_broadcaster.txn_broadcasted.lock().unwrap();
2985                         assert_eq!(node_txn.len(), 2);
2986                         // Node[1]: 2 * HTLC-timeout tx
2987                         // Node[0]: 2 * HTLC-timeout tx
2988                         check_spends!(node_txn[0], $commitment_tx);
2989                         check_spends!(node_txn[1], $commitment_tx);
2990                         assert_ne!(node_txn[0].lock_time, LockTime::ZERO);
2991                         assert_ne!(node_txn[1].lock_time, LockTime::ZERO);
2992                         if $htlc_offered {
2993                                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2994                                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2995                                 assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2996                                 assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2997                         } else {
2998                                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2999                                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3000                                 assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
3001                                 assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
3002                         }
3003                         node_txn.clear();
3004                 } }
3005         }
3006         // nodes[1] now broadcasts its own timeout-claim of the output that nodes[2] just claimed via success.
3007         check_tx_local_broadcast!(nodes[1], false, commitment_tx[0]);
3008
3009         // Broadcast legit commitment tx from A on B's chain
3010         // Broadcast preimage tx by B on offered output from A commitment tx  on A's chain
3011         let node_a_commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
3012         check_spends!(node_a_commitment_tx[0], chan_1.3);
3013         mine_transaction(&nodes[1], &node_a_commitment_tx[0]);
3014         check_closed_broadcast!(nodes[1], true);
3015         check_added_monitors!(nodes[1], 1);
3016         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
3017         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
3018         assert!(node_txn.len() == 1 || node_txn.len() == 3); // HTLC-Success, 2* RBF bumps of above HTLC txn
3019         let commitment_spend =
3020                 if node_txn.len() == 1 {
3021                         &node_txn[0]
3022                 } else {
3023                         // Certain `ConnectStyle`s will cause RBF bumps of the previous HTLC transaction to be broadcast.
3024                         // FullBlockViaListen
3025                         if node_txn[0].input[0].previous_output.txid == node_a_commitment_tx[0].txid() {
3026                                 check_spends!(node_txn[1], commitment_tx[0]);
3027                                 check_spends!(node_txn[2], commitment_tx[0]);
3028                                 assert_ne!(node_txn[1].input[0].previous_output.vout, node_txn[2].input[0].previous_output.vout);
3029                                 &node_txn[0]
3030                         } else {
3031                                 check_spends!(node_txn[0], commitment_tx[0]);
3032                                 check_spends!(node_txn[1], commitment_tx[0]);
3033                                 assert_ne!(node_txn[0].input[0].previous_output.vout, node_txn[1].input[0].previous_output.vout);
3034                                 &node_txn[2]
3035                         }
3036                 };
3037
3038         check_spends!(commitment_spend, node_a_commitment_tx[0]);
3039         assert_eq!(commitment_spend.input.len(), 2);
3040         assert_eq!(commitment_spend.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
3041         assert_eq!(commitment_spend.input[1].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
3042         assert_eq!(commitment_spend.lock_time.to_consensus_u32(), nodes[1].best_block_info().1);
3043         assert!(commitment_spend.output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
3044         // We don't bother to check that B can claim the HTLC output on its commitment tx here as
3045         // we already checked the same situation with A.
3046
3047         // Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent
3048         connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_a_commitment_tx[0].clone(), commitment_spend.clone()]));
3049         connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
3050         check_closed_broadcast!(nodes[0], true);
3051         check_added_monitors!(nodes[0], 1);
3052         let events = nodes[0].node.get_and_clear_pending_events();
3053         assert_eq!(events.len(), 5);
3054         let mut first_claimed = false;
3055         for event in events {
3056                 match event {
3057                         Event::PaymentSent { payment_preimage, payment_hash, .. } => {
3058                                 if payment_preimage == our_payment_preimage && payment_hash == payment_hash_1 {
3059                                         assert!(!first_claimed);
3060                                         first_claimed = true;
3061                                 } else {
3062                                         assert_eq!(payment_preimage, our_payment_preimage_2);
3063                                         assert_eq!(payment_hash, payment_hash_2);
3064                                 }
3065                         },
3066                         Event::PaymentPathSuccessful { .. } => {},
3067                         Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {},
3068                         _ => panic!("Unexpected event"),
3069                 }
3070         }
3071         check_tx_local_broadcast!(nodes[0], true, node_a_commitment_tx[0]);
3072 }
3073
3074 fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
3075         // Test that in case of a unilateral close onchain, we detect the state of output and
3076         // timeout the HTLC backward accordingly. So here we test that ChannelManager is
3077         // broadcasting the right event to other nodes in payment path.
3078         // A ------------------> B ----------------------> C (timeout)
3079         //    B's commitment tx                 C's commitment tx
3080         //            \                                  \
3081         //         B's HTLC timeout tx               B's timeout tx
3082
3083         let chanmon_cfgs = create_chanmon_cfgs(3);
3084         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3085         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3086         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3087         *nodes[0].connect_style.borrow_mut() = connect_style;
3088         *nodes[1].connect_style.borrow_mut() = connect_style;
3089         *nodes[2].connect_style.borrow_mut() = connect_style;
3090
3091         // Create some intial channels
3092         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
3093         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3094
3095         // Rebalance the network a bit by relaying one payment thorugh all the channels...
3096         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
3097         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
3098
3099         let (_payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
3100
3101         // Broadcast legit commitment tx from C on B's chain
3102         let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
3103         check_spends!(commitment_tx[0], chan_2.3);
3104         nodes[2].node.fail_htlc_backwards(&payment_hash);
3105         check_added_monitors!(nodes[2], 0);
3106         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]);
3107         check_added_monitors!(nodes[2], 1);
3108
3109         let events = nodes[2].node.get_and_clear_pending_msg_events();
3110         assert_eq!(events.len(), 1);
3111         match events[0] {
3112                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3113                         assert!(update_add_htlcs.is_empty());
3114                         assert!(!update_fail_htlcs.is_empty());
3115                         assert!(update_fulfill_htlcs.is_empty());
3116                         assert!(update_fail_malformed_htlcs.is_empty());
3117                         assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
3118                 },
3119                 _ => panic!("Unexpected event"),
3120         };
3121         mine_transaction(&nodes[2], &commitment_tx[0]);
3122         check_closed_broadcast!(nodes[2], true);
3123         check_added_monitors!(nodes[2], 1);
3124         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
3125         let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
3126         assert_eq!(node_txn.len(), 0);
3127
3128         // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain
3129         // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
3130         mine_transaction(&nodes[1], &commitment_tx[0]);
3131         check_closed_event!(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false
3132                 , [nodes[2].node.get_our_node_id()], 100000);
3133         connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1);
3134         let timeout_tx = {
3135                 let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
3136                 if nodes[1].connect_style.borrow().skips_blocks() {
3137                         assert_eq!(txn.len(), 1);
3138                 } else {
3139                         assert_eq!(txn.len(), 3); // Two extra fee bumps for timeout transaction
3140                 }
3141                 txn.iter().for_each(|tx| check_spends!(tx, commitment_tx[0]));
3142                 assert_eq!(txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3143                 txn.remove(0)
3144         };
3145
3146         mine_transaction(&nodes[1], &timeout_tx);
3147         check_added_monitors!(nodes[1], 1);
3148         check_closed_broadcast!(nodes[1], true);
3149
3150         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3151
3152         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
3153         check_added_monitors!(nodes[1], 1);
3154         let events = nodes[1].node.get_and_clear_pending_msg_events();
3155         assert_eq!(events.len(), 1);
3156         match events[0] {
3157                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3158                         assert!(update_add_htlcs.is_empty());
3159                         assert!(!update_fail_htlcs.is_empty());
3160                         assert!(update_fulfill_htlcs.is_empty());
3161                         assert!(update_fail_malformed_htlcs.is_empty());
3162                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3163                 },
3164                 _ => panic!("Unexpected event"),
3165         };
3166
3167         // Broadcast legit commitment tx from B on A's chain
3168         let commitment_tx = get_local_commitment_txn!(nodes[1], chan_1.2);
3169         check_spends!(commitment_tx[0], chan_1.3);
3170
3171         mine_transaction(&nodes[0], &commitment_tx[0]);
3172         connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
3173
3174         check_closed_broadcast!(nodes[0], true);
3175         check_added_monitors!(nodes[0], 1);
3176         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
3177         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // 1 timeout tx
3178         assert_eq!(node_txn.len(), 1);
3179         check_spends!(node_txn[0], commitment_tx[0]);
3180         assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3181 }
3182
3183 #[test]
3184 fn test_htlc_on_chain_timeout() {
3185         do_test_htlc_on_chain_timeout(ConnectStyle::BestBlockFirstSkippingBlocks);
3186         do_test_htlc_on_chain_timeout(ConnectStyle::TransactionsFirstSkippingBlocks);
3187         do_test_htlc_on_chain_timeout(ConnectStyle::FullBlockViaListen);
3188 }
3189
3190 #[test]
3191 fn test_simple_commitment_revoked_fail_backward() {
3192         // Test that in case of a revoked commitment tx, we detect the resolution of output by justice tx
3193         // and fail backward accordingly.
3194
3195         let chanmon_cfgs = create_chanmon_cfgs(3);
3196         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3197         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3198         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3199
3200         // Create some initial channels
3201         create_announced_chan_between_nodes(&nodes, 0, 1);
3202         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3203
3204         let (payment_preimage, _payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
3205         // Get the will-be-revoked local txn from nodes[2]
3206         let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
3207         // Revoke the old state
3208         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
3209
3210         let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
3211
3212         mine_transaction(&nodes[1], &revoked_local_txn[0]);
3213         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
3214         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3215         check_added_monitors!(nodes[1], 1);
3216         check_closed_broadcast!(nodes[1], true);
3217
3218         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
3219         check_added_monitors!(nodes[1], 1);
3220         let events = nodes[1].node.get_and_clear_pending_msg_events();
3221         assert_eq!(events.len(), 1);
3222         match events[0] {
3223                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
3224                         assert!(update_add_htlcs.is_empty());
3225                         assert_eq!(update_fail_htlcs.len(), 1);
3226                         assert!(update_fulfill_htlcs.is_empty());
3227                         assert!(update_fail_malformed_htlcs.is_empty());
3228                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3229
3230                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
3231                         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
3232                         expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_2.0.contents.short_channel_id, true);
3233                 },
3234                 _ => panic!("Unexpected event"),
3235         }
3236 }
3237
3238 fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use_dust: bool, no_to_remote: bool) {
3239         // Test that if our counterparty broadcasts a revoked commitment transaction we fail all
3240         // pending HTLCs on that channel backwards even if the HTLCs aren't present in our latest
3241         // commitment transaction anymore.
3242         // To do this, we have the peer which will broadcast a revoked commitment transaction send
3243         // a number of update_fail/commitment_signed updates without ever sending the RAA in
3244         // response to our commitment_signed. This is somewhat misbehavior-y, though not
3245         // technically disallowed and we should probably handle it reasonably.
3246         // Note that this is pretty exhaustive as an outbound HTLC which we haven't yet
3247         // failed/fulfilled backwards must be in at least one of the latest two remote commitment
3248         // transactions:
3249         // * Once we move it out of our holding cell/add it, we will immediately include it in a
3250         //   commitment_signed (implying it will be in the latest remote commitment transaction).
3251         // * Once they remove it, we will send a (the first) commitment_signed without the HTLC,
3252         //   and once they revoke the previous commitment transaction (allowing us to send a new
3253         //   commitment_signed) we will be free to fail/fulfill the HTLC backwards.
3254         let chanmon_cfgs = create_chanmon_cfgs(3);
3255         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3256         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3257         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3258
3259         // Create some initial channels
3260         create_announced_chan_between_nodes(&nodes, 0, 1);
3261         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3262
3263         let (payment_preimage, _payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], if no_to_remote { 10_000 } else { 3_000_000 });
3264         // Get the will-be-revoked local txn from nodes[2]
3265         let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
3266         assert_eq!(revoked_local_txn[0].output.len(), if no_to_remote { 1 } else { 2 });
3267         // Revoke the old state
3268         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
3269
3270         let value = if use_dust {
3271                 // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as
3272                 // well, so HTLCs at exactly the dust limit will not be included in commitment txn.
3273                 nodes[2].node.per_peer_state.read().unwrap().get(&nodes[1].node.get_our_node_id())
3274                         .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context().holder_dust_limit_satoshis * 1000
3275         } else { 3000000 };
3276
3277         let (_, first_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3278         let (_, second_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3279         let (_, third_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3280
3281         nodes[2].node.fail_htlc_backwards(&first_payment_hash);
3282         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]);
3283         check_added_monitors!(nodes[2], 1);
3284         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3285         assert!(updates.update_add_htlcs.is_empty());
3286         assert!(updates.update_fulfill_htlcs.is_empty());
3287         assert!(updates.update_fail_malformed_htlcs.is_empty());
3288         assert_eq!(updates.update_fail_htlcs.len(), 1);
3289         assert!(updates.update_fee.is_none());
3290         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3291         let bs_raa = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
3292         // Drop the last RAA from 3 -> 2
3293
3294         nodes[2].node.fail_htlc_backwards(&second_payment_hash);
3295         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: second_payment_hash }]);
3296         check_added_monitors!(nodes[2], 1);
3297         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3298         assert!(updates.update_add_htlcs.is_empty());
3299         assert!(updates.update_fulfill_htlcs.is_empty());
3300         assert!(updates.update_fail_malformed_htlcs.is_empty());
3301         assert_eq!(updates.update_fail_htlcs.len(), 1);
3302         assert!(updates.update_fee.is_none());
3303         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3304         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
3305         check_added_monitors!(nodes[1], 1);
3306         // Note that nodes[1] is in AwaitingRAA, so won't send a CS
3307         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
3308         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
3309         check_added_monitors!(nodes[2], 1);
3310
3311         nodes[2].node.fail_htlc_backwards(&third_payment_hash);
3312         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: third_payment_hash }]);
3313         check_added_monitors!(nodes[2], 1);
3314         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3315         assert!(updates.update_add_htlcs.is_empty());
3316         assert!(updates.update_fulfill_htlcs.is_empty());
3317         assert!(updates.update_fail_malformed_htlcs.is_empty());
3318         assert_eq!(updates.update_fail_htlcs.len(), 1);
3319         assert!(updates.update_fee.is_none());
3320         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3321         // At this point first_payment_hash has dropped out of the latest two commitment
3322         // transactions that nodes[1] is tracking...
3323         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
3324         check_added_monitors!(nodes[1], 1);
3325         // Note that nodes[1] is (still) in AwaitingRAA, so won't send a CS
3326         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
3327         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
3328         check_added_monitors!(nodes[2], 1);
3329
3330         // Add a fourth HTLC, this one will get sequestered away in nodes[1]'s holding cell waiting
3331         // on nodes[2]'s RAA.
3332         let (route, fourth_payment_hash, _, fourth_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 1000000);
3333         nodes[1].node.send_payment_with_route(&route, fourth_payment_hash,
3334                 RecipientOnionFields::secret_only(fourth_payment_secret), PaymentId(fourth_payment_hash.0)).unwrap();
3335         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3336         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3337         check_added_monitors!(nodes[1], 0);
3338
3339         if deliver_bs_raa {
3340                 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_raa);
3341                 // One monitor for the new revocation preimage, no second on as we won't generate a new
3342                 // commitment transaction for nodes[0] until process_pending_htlc_forwards().
3343                 check_added_monitors!(nodes[1], 1);
3344                 let events = nodes[1].node.get_and_clear_pending_events();
3345                 assert_eq!(events.len(), 2);
3346                 match events[0] {
3347                         Event::HTLCHandlingFailed { .. } => { },
3348                         _ => panic!("Unexpected event"),
3349                 }
3350                 match events[1] {
3351                         Event::PendingHTLCsForwardable { .. } => { },
3352                         _ => panic!("Unexpected event"),
3353                 };
3354                 // Deliberately don't process the pending fail-back so they all fail back at once after
3355                 // block connection just like the !deliver_bs_raa case
3356         }
3357
3358         let mut failed_htlcs = new_hash_set();
3359         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3360
3361         mine_transaction(&nodes[1], &revoked_local_txn[0]);
3362         check_added_monitors!(nodes[1], 1);
3363         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3364
3365         let events = nodes[1].node.get_and_clear_pending_events();
3366         assert_eq!(events.len(), if deliver_bs_raa { 3 + nodes.len() - 1 } else { 4 + nodes.len() });
3367         assert!(events.iter().any(|ev| matches!(
3368                 ev,
3369                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. }
3370         )));
3371         assert!(events.iter().any(|ev| matches!(
3372                 ev,
3373                 Event::PaymentPathFailed { ref payment_hash, .. } if *payment_hash == fourth_payment_hash
3374         )));
3375         assert!(events.iter().any(|ev| matches!(
3376                 ev,
3377                 Event::PaymentFailed { ref payment_hash, .. } if *payment_hash == fourth_payment_hash
3378         )));
3379
3380         nodes[1].node.process_pending_htlc_forwards();
3381         check_added_monitors!(nodes[1], 1);
3382
3383         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
3384         assert_eq!(events.len(), if deliver_bs_raa { 4 } else { 3 });
3385
3386         if deliver_bs_raa {
3387                 let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
3388                 match nodes_2_event {
3389                         MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3390                                 assert_eq!(nodes[2].node.get_our_node_id(), *node_id);
3391                                 assert_eq!(update_add_htlcs.len(), 1);
3392                                 assert!(update_fulfill_htlcs.is_empty());
3393                                 assert!(update_fail_htlcs.is_empty());
3394                                 assert!(update_fail_malformed_htlcs.is_empty());
3395                         },
3396                         _ => panic!("Unexpected event"),
3397                 }
3398         }
3399
3400         let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
3401         match nodes_2_event {
3402                 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { msg: Some(msgs::ErrorMessage { channel_id, ref data }) }, node_id: _ } => {
3403                         assert_eq!(channel_id, chan_2.2);
3404                         assert_eq!(data.as_str(), "Channel closed because commitment or closing transaction was confirmed on chain.");
3405                 },
3406                 _ => panic!("Unexpected event"),
3407         }
3408
3409         let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events);
3410         match nodes_0_event {
3411                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
3412                         assert!(update_add_htlcs.is_empty());
3413                         assert_eq!(update_fail_htlcs.len(), 3);
3414                         assert!(update_fulfill_htlcs.is_empty());
3415                         assert!(update_fail_malformed_htlcs.is_empty());
3416                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3417
3418                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
3419                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[1]);
3420                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[2]);
3421
3422                         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
3423
3424                         let events = nodes[0].node.get_and_clear_pending_events();
3425                         assert_eq!(events.len(), 6);
3426                         match events[0] {
3427                                 Event::PaymentPathFailed { ref payment_hash, ref failure, .. } => {
3428                                         assert!(failed_htlcs.insert(payment_hash.0));
3429                                         // If we delivered B's RAA we got an unknown preimage error, not something
3430                                         // that we should update our routing table for.
3431                                         if !deliver_bs_raa {
3432                                                 if let PathFailure::OnPath { network_update: Some(_) } = failure { } else { panic!("Unexpected path failure") }
3433                                         }
3434                                 },
3435                                 _ => panic!("Unexpected event"),
3436                         }
3437                         match events[1] {
3438                                 Event::PaymentFailed { ref payment_hash, .. } => {
3439                                         assert_eq!(*payment_hash, first_payment_hash);
3440                                 },
3441                                 _ => panic!("Unexpected event"),
3442                         }
3443                         match events[2] {
3444                                 Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => {
3445                                         assert!(failed_htlcs.insert(payment_hash.0));
3446                                 },
3447                                 _ => panic!("Unexpected event"),
3448                         }
3449                         match events[3] {
3450                                 Event::PaymentFailed { ref payment_hash, .. } => {
3451                                         assert_eq!(*payment_hash, second_payment_hash);
3452                                 },
3453                                 _ => panic!("Unexpected event"),
3454                         }
3455                         match events[4] {
3456                                 Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => {
3457                                         assert!(failed_htlcs.insert(payment_hash.0));
3458                                 },
3459                                 _ => panic!("Unexpected event"),
3460                         }
3461                         match events[5] {
3462                                 Event::PaymentFailed { ref payment_hash, .. } => {
3463                                         assert_eq!(*payment_hash, third_payment_hash);
3464                                 },
3465                                 _ => panic!("Unexpected event"),
3466                         }
3467                 },
3468                 _ => panic!("Unexpected event"),
3469         }
3470
3471         // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
3472         match events[0] {
3473                 MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {},
3474                 _ => panic!("Unexpected event"),
3475         }
3476
3477         assert!(failed_htlcs.contains(&first_payment_hash.0));
3478         assert!(failed_htlcs.contains(&second_payment_hash.0));
3479         assert!(failed_htlcs.contains(&third_payment_hash.0));
3480 }
3481
3482 #[test]
3483 fn test_commitment_revoked_fail_backward_exhaustive_a() {
3484         do_test_commitment_revoked_fail_backward_exhaustive(false, true, false);
3485         do_test_commitment_revoked_fail_backward_exhaustive(true, true, false);
3486         do_test_commitment_revoked_fail_backward_exhaustive(false, false, false);
3487         do_test_commitment_revoked_fail_backward_exhaustive(true, false, false);
3488 }
3489
3490 #[test]
3491 fn test_commitment_revoked_fail_backward_exhaustive_b() {
3492         do_test_commitment_revoked_fail_backward_exhaustive(false, true, true);
3493         do_test_commitment_revoked_fail_backward_exhaustive(true, true, true);
3494         do_test_commitment_revoked_fail_backward_exhaustive(false, false, true);
3495         do_test_commitment_revoked_fail_backward_exhaustive(true, false, true);
3496 }
3497
3498 #[test]
3499 fn fail_backward_pending_htlc_upon_channel_failure() {
3500         let chanmon_cfgs = create_chanmon_cfgs(2);
3501         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3502         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3503         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3504         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000);
3505
3506         // Alice -> Bob: Route a payment but without Bob sending revoke_and_ack.
3507         {
3508                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000);
3509                 nodes[0].node.send_payment_with_route(&route, payment_hash, RecipientOnionFields::secret_only(payment_secret),
3510                         PaymentId(payment_hash.0)).unwrap();
3511                 check_added_monitors!(nodes[0], 1);
3512
3513                 let payment_event = {
3514                         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3515                         assert_eq!(events.len(), 1);
3516                         SendEvent::from_event(events.remove(0))
3517                 };
3518                 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
3519                 assert_eq!(payment_event.msgs.len(), 1);
3520         }
3521
3522         // Alice -> Bob: Route another payment but now Alice waits for Bob's earlier revoke_and_ack.
3523         let (route, failed_payment_hash, _, failed_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000);
3524         {
3525                 nodes[0].node.send_payment_with_route(&route, failed_payment_hash,
3526                         RecipientOnionFields::secret_only(failed_payment_secret), PaymentId(failed_payment_hash.0)).unwrap();
3527                 check_added_monitors!(nodes[0], 0);
3528
3529                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3530         }
3531
3532         // Alice <- Bob: Send a malformed update_add_htlc so Alice fails the channel.
3533         {
3534                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 50_000);
3535
3536                 let secp_ctx = Secp256k1::new();
3537                 let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
3538                 let current_height = nodes[1].node.best_block.read().unwrap().height + 1;
3539                 let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret);
3540                 let (onion_payloads, _amount_msat, cltv_expiry) = onion_utils::build_onion_payloads(
3541                         &route.paths[0], 50_000, &recipient_onion_fields, current_height, &None).unwrap();
3542                 let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
3543                 let onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
3544
3545                 // Send a 0-msat update_add_htlc to fail the channel.
3546                 let update_add_htlc = msgs::UpdateAddHTLC {
3547                         channel_id: chan.2,
3548                         htlc_id: 0,
3549                         amount_msat: 0,
3550                         payment_hash,
3551                         cltv_expiry,
3552                         onion_routing_packet,
3553                         skimmed_fee_msat: None,
3554                         blinding_point: None,
3555                 };
3556                 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc);
3557         }
3558         let events = nodes[0].node.get_and_clear_pending_events();
3559         assert_eq!(events.len(), 3);
3560         // Check that Alice fails backward the pending HTLC from the second payment.
3561         match events[0] {
3562                 Event::PaymentPathFailed { payment_hash, .. } => {
3563                         assert_eq!(payment_hash, failed_payment_hash);
3564                 },
3565                 _ => panic!("Unexpected event"),
3566         }
3567         match events[1] {
3568                 Event::PaymentFailed { payment_hash, .. } => {
3569                         assert_eq!(payment_hash, failed_payment_hash);
3570                 },
3571                 _ => panic!("Unexpected event"),
3572         }
3573         match events[2] {
3574                 Event::ChannelClosed { reason: ClosureReason::ProcessingError { ref err }, .. } => {
3575                         assert_eq!(err, "Remote side tried to send a 0-msat HTLC");
3576                 },
3577                 _ => panic!("Unexpected event {:?}", events[1]),
3578         }
3579         check_closed_broadcast!(nodes[0], true);
3580         check_added_monitors!(nodes[0], 1);
3581 }
3582
3583 #[test]
3584 fn test_htlc_ignore_latest_remote_commitment() {
3585         // Test that HTLC transactions spending the latest remote commitment transaction are simply
3586         // ignored if we cannot claim them. This originally tickled an invalid unwrap().
3587         let chanmon_cfgs = create_chanmon_cfgs(2);
3588         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3589         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3590         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3591         if *nodes[1].connect_style.borrow() == ConnectStyle::FullBlockViaListen {
3592                 // We rely on the ability to connect a block redundantly, which isn't allowed via
3593                 // `chain::Listen`, so we never run the test if we randomly get assigned that
3594                 // connect_style.
3595                 return;
3596         }
3597         let funding_tx = create_announced_chan_between_nodes(&nodes, 0, 1).3;
3598
3599         route_payment(&nodes[0], &[&nodes[1]], 10000000);
3600         nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
3601         connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
3602         check_closed_broadcast!(nodes[0], true);
3603         check_added_monitors!(nodes[0], 1);
3604         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
3605
3606         let node_txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
3607         assert_eq!(node_txn.len(), 2);
3608         check_spends!(node_txn[0], funding_tx);
3609         check_spends!(node_txn[1], node_txn[0]);
3610
3611         let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone()]);
3612         connect_block(&nodes[1], &block);
3613         check_closed_broadcast!(nodes[1], true);
3614         check_added_monitors!(nodes[1], 1);
3615         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
3616
3617         // Duplicate the connect_block call since this may happen due to other listeners
3618         // registering new transactions
3619         connect_block(&nodes[1], &block);
3620 }
3621
3622 #[test]
3623 fn test_force_close_fail_back() {
3624         // Check which HTLCs are failed-backwards on channel force-closure
3625         let chanmon_cfgs = create_chanmon_cfgs(3);
3626         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3627         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3628         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3629         create_announced_chan_between_nodes(&nodes, 0, 1);
3630         create_announced_chan_between_nodes(&nodes, 1, 2);
3631
3632         let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000);
3633
3634         let mut payment_event = {
3635                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
3636                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
3637                 check_added_monitors!(nodes[0], 1);
3638
3639                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3640                 assert_eq!(events.len(), 1);
3641                 SendEvent::from_event(events.remove(0))
3642         };
3643
3644         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
3645         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
3646
3647         expect_pending_htlcs_forwardable!(nodes[1]);
3648
3649         let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
3650         assert_eq!(events_2.len(), 1);
3651         payment_event = SendEvent::from_event(events_2.remove(0));
3652         assert_eq!(payment_event.msgs.len(), 1);
3653
3654         check_added_monitors!(nodes[1], 1);
3655         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
3656         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg);
3657         check_added_monitors!(nodes[2], 1);
3658         let (_, _) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3659
3660         // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous
3661         // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
3662         // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
3663
3664         nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
3665         check_closed_broadcast!(nodes[2], true);
3666         check_added_monitors!(nodes[2], 1);
3667         check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
3668         let commitment_tx = {
3669                 let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
3670                 // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
3671                 // have a use for it unless nodes[2] learns the preimage somehow, the funds will go
3672                 // back to nodes[1] upon timeout otherwise.
3673                 assert_eq!(node_txn.len(), 1);
3674                 node_txn.remove(0)
3675         };
3676
3677         mine_transaction(&nodes[1], &commitment_tx);
3678
3679         // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
3680         check_closed_broadcast!(nodes[1], true);
3681         check_added_monitors!(nodes[1], 1);
3682         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
3683
3684         // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
3685         {
3686                 get_monitor!(nodes[2], payment_event.commitment_msg.channel_id)
3687                         .provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator), &node_cfgs[2].logger);
3688         }
3689         mine_transaction(&nodes[2], &commitment_tx);
3690         let mut node_txn = nodes[2].tx_broadcaster.txn_broadcast();
3691         assert_eq!(node_txn.len(), if nodes[2].connect_style.borrow().updates_best_block_first() { 2 } else { 1 });
3692         let htlc_tx = node_txn.pop().unwrap();
3693         assert_eq!(htlc_tx.input.len(), 1);
3694         assert_eq!(htlc_tx.input[0].previous_output.txid, commitment_tx.txid());
3695         assert_eq!(htlc_tx.lock_time, LockTime::ZERO); // Must be an HTLC-Success
3696         assert_eq!(htlc_tx.input[0].witness.len(), 5); // Must be an HTLC-Success
3697
3698         check_spends!(htlc_tx, commitment_tx);
3699 }
3700
3701 #[test]
3702 fn test_dup_events_on_peer_disconnect() {
3703         // Test that if we receive a duplicative update_fulfill_htlc message after a reconnect we do
3704         // not generate a corresponding duplicative PaymentSent event. This did not use to be the case
3705         // as we used to generate the event immediately upon receipt of the payment preimage in the
3706         // update_fulfill_htlc message.
3707
3708         let chanmon_cfgs = create_chanmon_cfgs(2);
3709         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3710         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3711         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3712         create_announced_chan_between_nodes(&nodes, 0, 1);
3713
3714         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
3715
3716         nodes[1].node.claim_funds(payment_preimage);
3717         expect_payment_claimed!(nodes[1], payment_hash, 1_000_000);
3718         check_added_monitors!(nodes[1], 1);
3719         let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
3720         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]);
3721         expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
3722
3723         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3724         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3725
3726         let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3727         reconnect_args.pending_htlc_claims.0 = 1;
3728         reconnect_nodes(reconnect_args);
3729         expect_payment_path_successful!(nodes[0]);
3730 }
3731
3732 #[test]
3733 fn test_peer_disconnected_before_funding_broadcasted() {
3734         // Test that channels are closed with `ClosureReason::DisconnectedPeer` if the peer disconnects
3735         // before the funding transaction has been broadcasted, and doesn't reconnect back within time.
3736         let chanmon_cfgs = create_chanmon_cfgs(2);
3737         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3738         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3739         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3740
3741         // Open a channel between `nodes[0]` and `nodes[1]`, for which the funding transaction is never
3742         // broadcasted, even though it's created by `nodes[0]`.
3743         let expected_temporary_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap();
3744         let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
3745         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
3746         let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
3747         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
3748
3749         let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
3750         assert_eq!(temporary_channel_id, expected_temporary_channel_id);
3751
3752         assert!(nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
3753
3754         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
3755         assert_eq!(funding_created_msg.temporary_channel_id, expected_temporary_channel_id);
3756
3757         // Even though the funding transaction is created by `nodes[0]`, the `FundingCreated` msg is
3758         // never sent to `nodes[1]`, and therefore the tx is never signed by either party nor
3759         // broadcasted.
3760         {
3761                 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
3762         }
3763
3764         // The peers disconnect before the funding is broadcasted.
3765         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3766         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3767
3768         // The time for peers to reconnect expires.
3769         for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS {
3770                 nodes[0].node.timer_tick_occurred();
3771         }
3772
3773         // Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` and a
3774         // `DiscardFunding` event when the peers are disconnected and do not reconnect before the
3775         // funding transaction is broadcasted.
3776         check_closed_event!(&nodes[0], 2, ClosureReason::DisconnectedPeer, true
3777                 , [nodes[1].node.get_our_node_id()], 1000000);
3778         check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false
3779                 , [nodes[0].node.get_our_node_id()], 1000000);
3780 }
3781
3782 #[test]
3783 fn test_simple_peer_disconnect() {
3784         // Test that we can reconnect when there are no lost messages
3785         let chanmon_cfgs = create_chanmon_cfgs(3);
3786         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3787         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3788         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3789         create_announced_chan_between_nodes(&nodes, 0, 1);
3790         create_announced_chan_between_nodes(&nodes, 1, 2);
3791
3792         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3793         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3794         let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3795         reconnect_args.send_channel_ready = (true, true);
3796         reconnect_nodes(reconnect_args);
3797
3798         let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
3799         let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3800         fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2);
3801         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1);
3802
3803         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3804         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3805         reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3806
3807         let (payment_preimage_3, payment_hash_3, ..) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000);
3808         let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
3809         let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3810         let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3811
3812         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3813         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3814
3815         claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_preimage_3);
3816         fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5);
3817
3818         let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3819         reconnect_args.pending_cell_htlc_fails.0 = 1;
3820         reconnect_args.pending_cell_htlc_claims.0 = 1;
3821         reconnect_nodes(reconnect_args);
3822         {
3823                 let events = nodes[0].node.get_and_clear_pending_events();
3824                 assert_eq!(events.len(), 4);
3825                 match events[0] {
3826                         Event::PaymentSent { payment_preimage, payment_hash, .. } => {
3827                                 assert_eq!(payment_preimage, payment_preimage_3);
3828                                 assert_eq!(payment_hash, payment_hash_3);
3829                         },
3830                         _ => panic!("Unexpected event"),
3831                 }
3832                 match events[1] {
3833                         Event::PaymentPathSuccessful { .. } => {},
3834                         _ => panic!("Unexpected event"),
3835                 }
3836                 match events[2] {
3837                         Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } => {
3838                                 assert_eq!(payment_hash, payment_hash_5);
3839                                 assert!(payment_failed_permanently);
3840                         },
3841                         _ => panic!("Unexpected event"),
3842                 }
3843                 match events[3] {
3844                         Event::PaymentFailed { payment_hash, .. } => {
3845                                 assert_eq!(payment_hash, payment_hash_5);
3846                         },
3847                         _ => panic!("Unexpected event"),
3848                 }
3849         }
3850         check_added_monitors(&nodes[0], 1);
3851
3852         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4);
3853         fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6);
3854 }
3855
3856 fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken_lnd: bool) {
3857         // Test that we can reconnect when in-flight HTLC updates get dropped
3858         let chanmon_cfgs = create_chanmon_cfgs(2);
3859         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3860         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3861         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3862
3863         let mut as_channel_ready = None;
3864         let channel_id = if messages_delivered == 0 {
3865                 let (channel_ready, chan_id, _) = create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001);
3866                 as_channel_ready = Some(channel_ready);
3867                 // nodes[1] doesn't receive the channel_ready message (it'll be re-sent on reconnect)
3868                 // Note that we store it so that if we're running with `simulate_broken_lnd` we can deliver
3869                 // it before the channel_reestablish message.
3870                 chan_id
3871         } else {
3872                 create_announced_chan_between_nodes(&nodes, 0, 1).2
3873         };
3874
3875         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000);
3876
3877         let payment_event = {
3878                 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
3879                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
3880                 check_added_monitors!(nodes[0], 1);
3881
3882                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3883                 assert_eq!(events.len(), 1);
3884                 SendEvent::from_event(events.remove(0))
3885         };
3886         assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id);
3887
3888         if messages_delivered < 2 {
3889                 // Drop the payment_event messages, and let them get re-generated in reconnect_nodes!
3890         } else {
3891                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
3892                 if messages_delivered >= 3 {
3893                         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
3894                         check_added_monitors!(nodes[1], 1);
3895                         let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
3896
3897                         if messages_delivered >= 4 {
3898                                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
3899                                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3900                                 check_added_monitors!(nodes[0], 1);
3901
3902                                 if messages_delivered >= 5 {
3903                                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
3904                                         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
3905                                         // No commitment_signed so get_event_msg's assert(len == 1) passes
3906                                         check_added_monitors!(nodes[0], 1);
3907
3908                                         if messages_delivered >= 6 {
3909                                                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
3910                                                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3911                                                 check_added_monitors!(nodes[1], 1);
3912                                         }
3913                                 }
3914                         }
3915                 }
3916         }
3917
3918         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3919         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3920         if messages_delivered < 3 {
3921                 if simulate_broken_lnd {
3922                         // lnd has a long-standing bug where they send a channel_ready prior to a
3923                         // channel_reestablish if you reconnect prior to channel_ready time.
3924                         //
3925                         // Here we simulate that behavior, delivering a channel_ready immediately on
3926                         // reconnect. Note that we don't bother skipping the now-duplicate channel_ready sent
3927                         // in `reconnect_nodes` but we currently don't fail based on that.
3928                         //
3929                         // See-also <https://github.com/lightningnetwork/lnd/issues/4006>
3930                         nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready.as_ref().unwrap().0);
3931                 }
3932                 // Even if the channel_ready messages get exchanged, as long as nothing further was
3933                 // received on either side, both sides will need to resend them.
3934                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3935                 reconnect_args.send_channel_ready = (true, true);
3936                 reconnect_args.pending_htlc_adds.1 = 1;
3937                 reconnect_nodes(reconnect_args);
3938         } else if messages_delivered == 3 {
3939                 // nodes[0] still wants its RAA + commitment_signed
3940                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3941                 reconnect_args.pending_responding_commitment_signed.0 = true;
3942                 reconnect_args.pending_raa.0 = true;
3943                 reconnect_nodes(reconnect_args);
3944         } else if messages_delivered == 4 {
3945                 // nodes[0] still wants its commitment_signed
3946                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3947                 reconnect_args.pending_responding_commitment_signed.0 = true;
3948                 reconnect_nodes(reconnect_args);
3949         } else if messages_delivered == 5 {
3950                 // nodes[1] still wants its final RAA
3951                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
3952                 reconnect_args.pending_raa.1 = true;
3953                 reconnect_nodes(reconnect_args);
3954         } else if messages_delivered == 6 {
3955                 // Everything was delivered...
3956                 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3957         }
3958
3959         let events_1 = nodes[1].node.get_and_clear_pending_events();
3960         if messages_delivered == 0 {
3961                 assert_eq!(events_1.len(), 2);
3962                 match events_1[0] {
3963                         Event::ChannelReady { .. } => { },
3964                         _ => panic!("Unexpected event"),
3965                 };
3966                 match events_1[1] {
3967                         Event::PendingHTLCsForwardable { .. } => { },
3968                         _ => panic!("Unexpected event"),
3969                 };
3970         } else {
3971                 assert_eq!(events_1.len(), 1);
3972                 match events_1[0] {
3973                         Event::PendingHTLCsForwardable { .. } => { },
3974                         _ => panic!("Unexpected event"),
3975                 };
3976         }
3977
3978         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3979         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
3980         reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3981
3982         nodes[1].node.process_pending_htlc_forwards();
3983
3984         let events_2 = nodes[1].node.get_and_clear_pending_events();
3985         assert_eq!(events_2.len(), 1);
3986         match events_2[0] {
3987                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
3988                         assert_eq!(payment_hash_1, *payment_hash);
3989                         assert_eq!(amount_msat, 1_000_000);
3990                         assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
3991                         assert_eq!(via_channel_id, Some(channel_id));
3992                         match &purpose {
3993                                 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
3994                                         assert!(payment_preimage.is_none());
3995                                         assert_eq!(payment_secret_1, *payment_secret);
3996                                 },
3997                                 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
3998                         }
3999                 },
4000                 _ => panic!("Unexpected event"),
4001         }
4002
4003         nodes[1].node.claim_funds(payment_preimage_1);
4004         check_added_monitors!(nodes[1], 1);
4005         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
4006
4007         let events_3 = nodes[1].node.get_and_clear_pending_msg_events();
4008         assert_eq!(events_3.len(), 1);
4009         let (update_fulfill_htlc, commitment_signed) = match events_3[0] {
4010                 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
4011                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
4012                         assert!(updates.update_add_htlcs.is_empty());
4013                         assert!(updates.update_fail_htlcs.is_empty());
4014                         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
4015                         assert!(updates.update_fail_malformed_htlcs.is_empty());
4016                         assert!(updates.update_fee.is_none());
4017                         (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
4018                 },
4019                 _ => panic!("Unexpected event"),
4020         };
4021
4022         if messages_delivered >= 1 {
4023                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlc);
4024
4025                 let events_4 = nodes[0].node.get_and_clear_pending_events();
4026                 assert_eq!(events_4.len(), 1);
4027                 match events_4[0] {
4028                         Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
4029                                 assert_eq!(payment_preimage_1, *payment_preimage);
4030                                 assert_eq!(payment_hash_1, *payment_hash);
4031                         },
4032                         _ => panic!("Unexpected event"),
4033                 }
4034
4035                 if messages_delivered >= 2 {
4036                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
4037                         check_added_monitors!(nodes[0], 1);
4038                         let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4039
4040                         if messages_delivered >= 3 {
4041                                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
4042                                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4043                                 check_added_monitors!(nodes[1], 1);
4044
4045                                 if messages_delivered >= 4 {
4046                                         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed);
4047                                         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4048                                         // No commitment_signed so get_event_msg's assert(len == 1) passes
4049                                         check_added_monitors!(nodes[1], 1);
4050
4051                                         if messages_delivered >= 5 {
4052                                                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
4053                                                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4054                                                 check_added_monitors!(nodes[0], 1);
4055                                         }
4056                                 }
4057                         }
4058                 }
4059         }
4060
4061         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
4062         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
4063         if messages_delivered < 2 {
4064                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4065                 reconnect_args.pending_htlc_claims.0 = 1;
4066                 reconnect_nodes(reconnect_args);
4067                 if messages_delivered < 1 {
4068                         expect_payment_sent!(nodes[0], payment_preimage_1);
4069                 } else {
4070                         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4071                 }
4072         } else if messages_delivered == 2 {
4073                 // nodes[0] still wants its RAA + commitment_signed
4074                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4075                 reconnect_args.pending_responding_commitment_signed.1 = true;
4076                 reconnect_args.pending_raa.1 = true;
4077                 reconnect_nodes(reconnect_args);
4078         } else if messages_delivered == 3 {
4079                 // nodes[0] still wants its commitment_signed
4080                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4081                 reconnect_args.pending_responding_commitment_signed.1 = true;
4082                 reconnect_nodes(reconnect_args);
4083         } else if messages_delivered == 4 {
4084                 // nodes[1] still wants its final RAA
4085                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
4086                 reconnect_args.pending_raa.0 = true;
4087                 reconnect_nodes(reconnect_args);
4088         } else if messages_delivered == 5 {
4089                 // Everything was delivered...
4090                 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
4091         }
4092
4093         if messages_delivered == 1 || messages_delivered == 2 {
4094                 expect_payment_path_successful!(nodes[0]);
4095         }
4096         if messages_delivered <= 5 {
4097                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
4098                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
4099         }
4100         reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
4101
4102         if messages_delivered > 2 {
4103                 expect_payment_path_successful!(nodes[0]);
4104         }
4105
4106         // Channel should still work fine...
4107         let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
4108         let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
4109         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
4110 }
4111
4112 #[test]
4113 fn test_drop_messages_peer_disconnect_a() {
4114         do_test_drop_messages_peer_disconnect(0, true);
4115         do_test_drop_messages_peer_disconnect(0, false);
4116         do_test_drop_messages_peer_disconnect(1, false);
4117         do_test_drop_messages_peer_disconnect(2, false);
4118 }
4119
4120 #[test]
4121 fn test_drop_messages_peer_disconnect_b() {
4122         do_test_drop_messages_peer_disconnect(3, false);
4123         do_test_drop_messages_peer_disconnect(4, false);
4124         do_test_drop_messages_peer_disconnect(5, false);
4125         do_test_drop_messages_peer_disconnect(6, false);
4126 }
4127
4128 #[test]
4129 fn test_channel_ready_without_best_block_updated() {
4130         // Previously, if we were offline when a funding transaction was locked in, and then we came
4131         // back online, calling best_block_updated once followed by transactions_confirmed, we'd not
4132         // generate a channel_ready until a later best_block_updated. This tests that we generate the
4133         // channel_ready immediately instead.
4134         let chanmon_cfgs = create_chanmon_cfgs(2);
4135         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4136         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4137         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4138         *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
4139
4140         let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
4141
4142         let conf_height = nodes[0].best_block_info().1 + 1;
4143         connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
4144         let block_txn = [funding_tx];
4145         let conf_txn: Vec<_> = block_txn.iter().enumerate().collect();
4146         let conf_block_header = nodes[0].get_block_header(conf_height);
4147         nodes[0].node.transactions_confirmed(&conf_block_header, &conf_txn[..], conf_height);
4148
4149         // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
4150         let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
4151         nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
4152 }
4153
4154 #[test]
4155 fn test_channel_monitor_skipping_block_when_channel_manager_is_leading() {
4156         let chanmon_cfgs = create_chanmon_cfgs(2);
4157         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4158         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4159         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4160
4161         // Let channel_manager get ahead of chain_monitor by 1 block.
4162         // This is to emulate race-condition where newly added channel_monitor skips processing 1 block,
4163         // in case where client calls block_connect on channel_manager first and then on chain_monitor.
4164         let height_1 = nodes[0].best_block_info().1 + 1;
4165         let mut block_1 = create_dummy_block(nodes[0].best_block_hash(), height_1, Vec::new());
4166
4167         nodes[0].blocks.lock().unwrap().push((block_1.clone(), height_1));
4168         nodes[0].node.block_connected(&block_1, height_1);
4169
4170         // Create channel, and it gets added to chain_monitor in funding_created.
4171         let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
4172
4173         // Now, newly added channel_monitor in chain_monitor hasn't processed block_1,
4174         // but it's best_block is block_1, since that was populated by channel_manager, and channel_manager
4175         // was running ahead of chain_monitor at the time of funding_created.
4176         // Later on, subsequent blocks are connected to both channel_manager and chain_monitor.
4177         // Hence, this channel's channel_monitor skipped block_1, directly tries to process subsequent blocks.
4178         confirm_transaction_at(&nodes[0], &funding_tx, nodes[0].best_block_info().1 + 1);
4179         connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
4180
4181         // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
4182         let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
4183         nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
4184 }
4185
4186 #[test]
4187 fn test_channel_monitor_skipping_block_when_channel_manager_is_lagging() {
4188         let chanmon_cfgs = create_chanmon_cfgs(2);
4189         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4190         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4191         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4192
4193         // Let chain_monitor get ahead of channel_manager by 1 block.
4194         // This is to emulate race-condition where newly added channel_monitor skips processing 1 block,
4195         // in case where client calls block_connect on chain_monitor first and then on channel_manager.
4196         let height_1 = nodes[0].best_block_info().1 + 1;
4197         let mut block_1 = create_dummy_block(nodes[0].best_block_hash(), height_1, Vec::new());
4198
4199         nodes[0].blocks.lock().unwrap().push((block_1.clone(), height_1));
4200         nodes[0].chain_monitor.chain_monitor.block_connected(&block_1, height_1);
4201
4202         // Create channel, and it gets added to chain_monitor in funding_created.
4203         let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
4204
4205         // channel_manager can't really skip block_1, it should get it eventually.
4206         nodes[0].node.block_connected(&block_1, height_1);
4207
4208         // Now, newly added channel_monitor in chain_monitor hasn't processed block_1, it's best_block is
4209         // the block before block_1, since that was populated by channel_manager, and channel_manager was
4210         // running behind at the time of funding_created.
4211         // Later on, subsequent blocks are connected to both channel_manager and chain_monitor.
4212         // Hence, this channel's channel_monitor skipped block_1, directly tries to process subsequent blocks.
4213         confirm_transaction_at(&nodes[0], &funding_tx, nodes[0].best_block_info().1 + 1);
4214         connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
4215
4216         // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
4217         let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
4218         nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
4219 }
4220
4221 #[test]
4222 fn test_drop_messages_peer_disconnect_dual_htlc() {
4223         // Test that we can handle reconnecting when both sides of a channel have pending
4224         // commitment_updates when we disconnect.
4225         let chanmon_cfgs = create_chanmon_cfgs(2);
4226         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4227         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4228         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4229         create_announced_chan_between_nodes(&nodes, 0, 1);
4230
4231         let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
4232
4233         // Now try to send a second payment which will fail to send
4234         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
4235         nodes[0].node.send_payment_with_route(&route, payment_hash_2,
4236                 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
4237         check_added_monitors!(nodes[0], 1);
4238
4239         let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
4240         assert_eq!(events_1.len(), 1);
4241         match events_1[0] {
4242                 MessageSendEvent::UpdateHTLCs { .. } => {},
4243                 _ => panic!("Unexpected event"),
4244         }
4245
4246         nodes[1].node.claim_funds(payment_preimage_1);
4247         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
4248         check_added_monitors!(nodes[1], 1);
4249
4250         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
4251         assert_eq!(events_2.len(), 1);
4252         match events_2[0] {
4253                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
4254                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
4255                         assert!(update_add_htlcs.is_empty());
4256                         assert_eq!(update_fulfill_htlcs.len(), 1);
4257                         assert!(update_fail_htlcs.is_empty());
4258                         assert!(update_fail_malformed_htlcs.is_empty());
4259                         assert!(update_fee.is_none());
4260
4261                         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
4262                         let events_3 = nodes[0].node.get_and_clear_pending_events();
4263                         assert_eq!(events_3.len(), 1);
4264                         match events_3[0] {
4265                                 Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
4266                                         assert_eq!(*payment_preimage, payment_preimage_1);
4267                                         assert_eq!(*payment_hash, payment_hash_1);
4268                                 },
4269                                 _ => panic!("Unexpected event"),
4270                         }
4271
4272                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
4273                         let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4274                         // No commitment_signed so get_event_msg's assert(len == 1) passes
4275                         check_added_monitors!(nodes[0], 1);
4276                 },
4277                 _ => panic!("Unexpected event"),
4278         }
4279
4280         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
4281         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
4282
4283         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
4284                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
4285         }, true).unwrap();
4286         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
4287         assert_eq!(reestablish_1.len(), 1);
4288         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
4289                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
4290         }, false).unwrap();
4291         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
4292         assert_eq!(reestablish_2.len(), 1);
4293
4294         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
4295         let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
4296         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
4297         let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
4298
4299         assert!(as_resp.0.is_none());
4300         assert!(bs_resp.0.is_none());
4301
4302         assert!(bs_resp.1.is_none());
4303         assert!(bs_resp.2.is_none());
4304
4305         assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
4306
4307         assert_eq!(as_resp.2.as_ref().unwrap().update_add_htlcs.len(), 1);
4308         assert!(as_resp.2.as_ref().unwrap().update_fulfill_htlcs.is_empty());
4309         assert!(as_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
4310         assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
4311         assert!(as_resp.2.as_ref().unwrap().update_fee.is_none());
4312         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]);
4313         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed);
4314         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4315         // No commitment_signed so get_event_msg's assert(len == 1) passes
4316         check_added_monitors!(nodes[1], 1);
4317
4318         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap());
4319         let bs_second_commitment_signed = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4320         assert!(bs_second_commitment_signed.update_add_htlcs.is_empty());
4321         assert!(bs_second_commitment_signed.update_fulfill_htlcs.is_empty());
4322         assert!(bs_second_commitment_signed.update_fail_htlcs.is_empty());
4323         assert!(bs_second_commitment_signed.update_fail_malformed_htlcs.is_empty());
4324         assert!(bs_second_commitment_signed.update_fee.is_none());
4325         check_added_monitors!(nodes[1], 1);
4326
4327         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
4328         let as_commitment_signed = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4329         assert!(as_commitment_signed.update_add_htlcs.is_empty());
4330         assert!(as_commitment_signed.update_fulfill_htlcs.is_empty());
4331         assert!(as_commitment_signed.update_fail_htlcs.is_empty());
4332         assert!(as_commitment_signed.update_fail_malformed_htlcs.is_empty());
4333         assert!(as_commitment_signed.update_fee.is_none());
4334         check_added_monitors!(nodes[0], 1);
4335
4336         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed);
4337         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4338         // No commitment_signed so get_event_msg's assert(len == 1) passes
4339         check_added_monitors!(nodes[0], 1);
4340
4341         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed);
4342         let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4343         // No commitment_signed so get_event_msg's assert(len == 1) passes
4344         check_added_monitors!(nodes[1], 1);
4345
4346         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
4347         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4348         check_added_monitors!(nodes[1], 1);
4349
4350         expect_pending_htlcs_forwardable!(nodes[1]);
4351
4352         let events_5 = nodes[1].node.get_and_clear_pending_events();
4353         assert_eq!(events_5.len(), 1);
4354         match events_5[0] {
4355                 Event::PaymentClaimable { ref payment_hash, ref purpose, .. } => {
4356                         assert_eq!(payment_hash_2, *payment_hash);
4357                         match &purpose {
4358                                 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
4359                                         assert!(payment_preimage.is_none());
4360                                         assert_eq!(payment_secret_2, *payment_secret);
4361                                 },
4362                                 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
4363                         }
4364                 },
4365                 _ => panic!("Unexpected event"),
4366         }
4367
4368         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack);
4369         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4370         check_added_monitors!(nodes[0], 1);
4371
4372         expect_payment_path_successful!(nodes[0]);
4373         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
4374 }
4375
4376 fn do_test_htlc_timeout(send_partial_mpp: bool) {
4377         // If the user fails to claim/fail an HTLC within the HTLC CLTV timeout we fail it for them
4378         // to avoid our counterparty failing the channel.
4379         let chanmon_cfgs = create_chanmon_cfgs(2);
4380         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4381         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4382         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4383
4384         create_announced_chan_between_nodes(&nodes, 0, 1);
4385
4386         let our_payment_hash = if send_partial_mpp {
4387                 let (route, our_payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100000);
4388                 // Use the utility function send_payment_along_path to send the payment with MPP data which
4389                 // indicates there are more HTLCs coming.
4390                 let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
4391                 let payment_id = PaymentId([42; 32]);
4392                 let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
4393                         RecipientOnionFields::secret_only(payment_secret), payment_id, &route).unwrap();
4394                 nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash,
4395                         RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id,
4396                         &None, session_privs[0]).unwrap();
4397                 check_added_monitors!(nodes[0], 1);
4398                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
4399                 assert_eq!(events.len(), 1);
4400                 // Now do the relevant commitment_signed/RAA dances along the path, noting that the final
4401                 // hop should *not* yet generate any PaymentClaimable event(s).
4402                 pass_along_path(&nodes[0], &[&nodes[1]], 100000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
4403                 our_payment_hash
4404         } else {
4405                 route_payment(&nodes[0], &[&nodes[1]], 100000).1
4406         };
4407
4408         let mut block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new());
4409         connect_block(&nodes[0], &block);
4410         connect_block(&nodes[1], &block);
4411         let block_count = TEST_FINAL_CLTV + CHAN_CONFIRM_DEPTH + 2 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS;
4412         for _ in CHAN_CONFIRM_DEPTH + 2..block_count {
4413                 block.header.prev_blockhash = block.block_hash();
4414                 connect_block(&nodes[0], &block);
4415                 connect_block(&nodes[1], &block);
4416         }
4417
4418         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
4419
4420         check_added_monitors!(nodes[1], 1);
4421         let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4422         assert!(htlc_timeout_updates.update_add_htlcs.is_empty());
4423         assert_eq!(htlc_timeout_updates.update_fail_htlcs.len(), 1);
4424         assert!(htlc_timeout_updates.update_fail_malformed_htlcs.is_empty());
4425         assert!(htlc_timeout_updates.update_fee.is_none());
4426
4427         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_timeout_updates.update_fail_htlcs[0]);
4428         commitment_signed_dance!(nodes[0], nodes[1], htlc_timeout_updates.commitment_signed, false);
4429         // 100_000 msat as u64, followed by the height at which we failed back above
4430         let mut expected_failure_data = (100_000 as u64).to_be_bytes().to_vec();
4431         expected_failure_data.extend_from_slice(&(block_count - 1).to_be_bytes());
4432         expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000 | 15, &expected_failure_data[..]);
4433 }
4434
4435 #[test]
4436 fn test_htlc_timeout() {
4437         do_test_htlc_timeout(true);
4438         do_test_htlc_timeout(false);
4439 }
4440
4441 fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) {
4442         // Tests that HTLCs in the holding cell are timed out after the requisite number of blocks.
4443         let chanmon_cfgs = create_chanmon_cfgs(3);
4444         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
4445         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
4446         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
4447         create_announced_chan_between_nodes(&nodes, 0, 1);
4448         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4449
4450         // Make sure all nodes are at the same starting height
4451         connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
4452         connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
4453         connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
4454
4455         // Route a first payment to get the 1 -> 2 channel in awaiting_raa...
4456         let (route, first_payment_hash, _, first_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
4457         nodes[1].node.send_payment_with_route(&route, first_payment_hash,
4458                 RecipientOnionFields::secret_only(first_payment_secret), PaymentId(first_payment_hash.0)).unwrap();
4459         assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1);
4460         check_added_monitors!(nodes[1], 1);
4461
4462         // Now attempt to route a second payment, which should be placed in the holding cell
4463         let sending_node = if forwarded_htlc { &nodes[0] } else { &nodes[1] };
4464         let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(sending_node, nodes[2], 100000);
4465         sending_node.node.send_payment_with_route(&route, second_payment_hash,
4466                 RecipientOnionFields::secret_only(second_payment_secret), PaymentId(second_payment_hash.0)).unwrap();
4467         if forwarded_htlc {
4468                 check_added_monitors!(nodes[0], 1);
4469                 let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
4470                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
4471                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
4472                 expect_pending_htlcs_forwardable!(nodes[1]);
4473         }
4474         check_added_monitors!(nodes[1], 0);
4475
4476         connect_blocks(&nodes[1], TEST_FINAL_CLTV - LATENCY_GRACE_PERIOD_BLOCKS);
4477         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4478         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
4479         connect_blocks(&nodes[1], 1);
4480
4481         if forwarded_htlc {
4482                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
4483                 check_added_monitors!(nodes[1], 1);
4484                 let fail_commit = nodes[1].node.get_and_clear_pending_msg_events();
4485                 assert_eq!(fail_commit.len(), 1);
4486                 match fail_commit[0] {
4487                         MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, ref commitment_signed, .. }, .. } => {
4488                                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
4489                                 commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, true, true);
4490                         },
4491                         _ => unreachable!(),
4492                 }
4493                 expect_payment_failed_with_update!(nodes[0], second_payment_hash, false, chan_2.0.contents.short_channel_id, false);
4494         } else {
4495                 expect_payment_failed!(nodes[1], second_payment_hash, false);
4496         }
4497 }
4498
4499 #[test]
4500 fn test_holding_cell_htlc_add_timeouts() {
4501         do_test_holding_cell_htlc_add_timeouts(false);
4502         do_test_holding_cell_htlc_add_timeouts(true);
4503 }
4504
4505 macro_rules! check_spendable_outputs {
4506         ($node: expr, $keysinterface: expr) => {
4507                 {
4508                         let mut events = $node.chain_monitor.chain_monitor.get_and_clear_pending_events();
4509                         let mut txn = Vec::new();
4510                         let mut all_outputs = Vec::new();
4511                         let secp_ctx = Secp256k1::new();
4512                         for event in events.drain(..) {
4513                                 match event {
4514                                         Event::SpendableOutputs { mut outputs, channel_id: _ } => {
4515                                                 for outp in outputs.drain(..) {
4516                                                         txn.push($keysinterface.backing.spend_spendable_outputs(&[&outp], Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx).unwrap());
4517                                                         all_outputs.push(outp);
4518                                                 }
4519                                         },
4520                                         _ => panic!("Unexpected event"),
4521                                 };
4522                         }
4523                         if all_outputs.len() > 1 {
4524                                 if let Ok(tx) = $keysinterface.backing.spend_spendable_outputs(&all_outputs.iter().map(|a| a).collect::<Vec<_>>(), Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx) {
4525                                         txn.push(tx);
4526                                 }
4527                         }
4528                         txn
4529                 }
4530         }
4531 }
4532
4533 #[test]
4534 fn test_claim_sizeable_push_msat() {
4535         // Incidentally test SpendableOutput event generation due to detection of to_local output on commitment tx
4536         let chanmon_cfgs = create_chanmon_cfgs(2);
4537         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4538         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4539         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4540
4541         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000);
4542         nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
4543         check_closed_broadcast!(nodes[1], true);
4544         check_added_monitors!(nodes[1], 1);
4545         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
4546         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4547         assert_eq!(node_txn.len(), 1);
4548         check_spends!(node_txn[0], chan.3);
4549         assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
4550
4551         mine_transaction(&nodes[1], &node_txn[0]);
4552         connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1);
4553
4554         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4555         assert_eq!(spend_txn.len(), 1);
4556         assert_eq!(spend_txn[0].input.len(), 1);
4557         check_spends!(spend_txn[0], node_txn[0]);
4558         assert_eq!(spend_txn[0].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
4559 }
4560
4561 #[test]
4562 fn test_claim_on_remote_sizeable_push_msat() {
4563         // Same test as previous, just test on remote commitment tx, as per_commitment_point registration changes following you're funder/fundee and
4564         // to_remote output is encumbered by a P2WPKH
4565         let chanmon_cfgs = create_chanmon_cfgs(2);
4566         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4567         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4568         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4569
4570         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000);
4571         nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
4572         check_closed_broadcast!(nodes[0], true);
4573         check_added_monitors!(nodes[0], 1);
4574         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
4575
4576         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4577         assert_eq!(node_txn.len(), 1);
4578         check_spends!(node_txn[0], chan.3);
4579         assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
4580
4581         mine_transaction(&nodes[1], &node_txn[0]);
4582         check_closed_broadcast!(nodes[1], true);
4583         check_added_monitors!(nodes[1], 1);
4584         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4585         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4586
4587         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4588         assert_eq!(spend_txn.len(), 1);
4589         check_spends!(spend_txn[0], node_txn[0]);
4590 }
4591
4592 #[test]
4593 fn test_claim_on_remote_revoked_sizeable_push_msat() {
4594         // Same test as previous, just test on remote revoked commitment tx, as per_commitment_point registration changes following you're funder/fundee and
4595         // to_remote output is encumbered by a P2WPKH
4596
4597         let chanmon_cfgs = create_chanmon_cfgs(2);
4598         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4599         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4600         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4601
4602         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 59000000);
4603         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4604         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan.2);
4605         assert_eq!(revoked_local_txn[0].input.len(), 1);
4606         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
4607
4608         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4609         mine_transaction(&nodes[1], &revoked_local_txn[0]);
4610         check_closed_broadcast!(nodes[1], true);
4611         check_added_monitors!(nodes[1], 1);
4612         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4613
4614         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4615         mine_transaction(&nodes[1], &node_txn[0]);
4616         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4617
4618         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4619         assert_eq!(spend_txn.len(), 3);
4620         check_spends!(spend_txn[0], revoked_local_txn[0]); // to_remote output on revoked remote commitment_tx
4621         check_spends!(spend_txn[1], node_txn[0]);
4622         check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[0]); // Both outputs
4623 }
4624
4625 #[test]
4626 fn test_static_spendable_outputs_preimage_tx() {
4627         let chanmon_cfgs = create_chanmon_cfgs(2);
4628         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4629         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4630         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4631
4632         // Create some initial channels
4633         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4634
4635         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
4636
4637         let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
4638         assert_eq!(commitment_tx[0].input.len(), 1);
4639         assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
4640
4641         // Settle A's commitment tx on B's chain
4642         nodes[1].node.claim_funds(payment_preimage);
4643         expect_payment_claimed!(nodes[1], payment_hash, 3_000_000);
4644         check_added_monitors!(nodes[1], 1);
4645         mine_transaction(&nodes[1], &commitment_tx[0]);
4646         check_added_monitors!(nodes[1], 1);
4647         let events = nodes[1].node.get_and_clear_pending_msg_events();
4648         match events[0] {
4649                 MessageSendEvent::UpdateHTLCs { .. } => {},
4650                 _ => panic!("Unexpected event"),
4651         }
4652         match events[2] {
4653                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4654                 _ => panic!("Unexepected event"),
4655         }
4656
4657         // Check B's monitor was able to send back output descriptor event for preimage tx on A's commitment tx
4658         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: preimage tx
4659         assert_eq!(node_txn.len(), 1);
4660         check_spends!(node_txn[0], commitment_tx[0]);
4661         assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
4662
4663         mine_transaction(&nodes[1], &node_txn[0]);
4664         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4665         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4666
4667         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4668         assert_eq!(spend_txn.len(), 1);
4669         check_spends!(spend_txn[0], node_txn[0]);
4670 }
4671
4672 #[test]
4673 fn test_static_spendable_outputs_timeout_tx() {
4674         let chanmon_cfgs = create_chanmon_cfgs(2);
4675         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4676         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4677         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4678
4679         // Create some initial channels
4680         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4681
4682         // Rebalance the network a bit by relaying one payment through all the channels ...
4683         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
4684
4685         let (_, our_payment_hash, ..) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000);
4686
4687         let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
4688         assert_eq!(commitment_tx[0].input.len(), 1);
4689         assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
4690
4691         // Settle A's commitment tx on B' chain
4692         mine_transaction(&nodes[1], &commitment_tx[0]);
4693         check_added_monitors!(nodes[1], 1);
4694         let events = nodes[1].node.get_and_clear_pending_msg_events();
4695         match events[1] {
4696                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4697                 _ => panic!("Unexpected event"),
4698         }
4699         connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
4700
4701         // Check B's monitor was able to send back output descriptor event for timeout tx on A's commitment tx
4702         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4703         assert_eq!(node_txn.len(), 1); // ChannelMonitor: timeout tx
4704         check_spends!(node_txn[0],  commitment_tx[0].clone());
4705         assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4706
4707         mine_transaction(&nodes[1], &node_txn[0]);
4708         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4709         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4710         expect_payment_failed!(nodes[1], our_payment_hash, false);
4711
4712         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4713         assert_eq!(spend_txn.len(), 3); // SpendableOutput: remote_commitment_tx.to_remote, timeout_tx.output
4714         check_spends!(spend_txn[0], commitment_tx[0]);
4715         check_spends!(spend_txn[1], node_txn[0]);
4716         check_spends!(spend_txn[2], node_txn[0], commitment_tx[0]); // All outputs
4717 }
4718
4719 #[test]
4720 fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
4721         let chanmon_cfgs = create_chanmon_cfgs(2);
4722         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4723         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4724         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4725
4726         // Create some initial channels
4727         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4728
4729         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4730         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
4731         assert_eq!(revoked_local_txn[0].input.len(), 1);
4732         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4733
4734         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4735
4736         mine_transaction(&nodes[1], &revoked_local_txn[0]);
4737         check_closed_broadcast!(nodes[1], true);
4738         check_added_monitors!(nodes[1], 1);
4739         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4740
4741         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4742         assert_eq!(node_txn.len(), 1);
4743         assert_eq!(node_txn[0].input.len(), 2);
4744         check_spends!(node_txn[0], revoked_local_txn[0]);
4745
4746         mine_transaction(&nodes[1], &node_txn[0]);
4747         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4748
4749         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4750         assert_eq!(spend_txn.len(), 1);
4751         check_spends!(spend_txn[0], node_txn[0]);
4752 }
4753
4754 #[test]
4755 fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
4756         let mut chanmon_cfgs = create_chanmon_cfgs(2);
4757         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
4758         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4759         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4760         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4761
4762         // Create some initial channels
4763         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4764
4765         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4766         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
4767         assert_eq!(revoked_local_txn[0].input.len(), 1);
4768         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4769
4770         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4771
4772         // A will generate HTLC-Timeout from revoked commitment tx
4773         mine_transaction(&nodes[0], &revoked_local_txn[0]);
4774         check_closed_broadcast!(nodes[0], true);
4775         check_added_monitors!(nodes[0], 1);
4776         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
4777         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
4778
4779         let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4780         assert_eq!(revoked_htlc_txn.len(), 1);
4781         assert_eq!(revoked_htlc_txn[0].input.len(), 1);
4782         assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
4783         check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]);
4784         assert_ne!(revoked_htlc_txn[0].lock_time, LockTime::ZERO); // HTLC-Timeout
4785
4786         // B will generate justice tx from A's revoked commitment/HTLC tx
4787         connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
4788         check_closed_broadcast!(nodes[1], true);
4789         check_added_monitors!(nodes[1], 1);
4790         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4791
4792         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4793         assert_eq!(node_txn.len(), 2); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs
4794         // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0]
4795         // including the one already spent by revoked_htlc_txn[1]. That's OK, we'll spend with valid
4796         // transactions next...
4797         assert_eq!(node_txn[0].input.len(), 3);
4798         check_spends!(node_txn[0], revoked_local_txn[0], revoked_htlc_txn[0]);
4799
4800         assert_eq!(node_txn[1].input.len(), 2);
4801         check_spends!(node_txn[1], revoked_local_txn[0], revoked_htlc_txn[0]);
4802         if node_txn[1].input[1].previous_output.txid == revoked_htlc_txn[0].txid() {
4803                 assert_ne!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4804         } else {
4805                 assert_eq!(node_txn[1].input[0].previous_output.txid, revoked_htlc_txn[0].txid());
4806                 assert_ne!(node_txn[1].input[1].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4807         }
4808
4809         mine_transaction(&nodes[1], &node_txn[1]);
4810         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4811
4812         // Check B's ChannelMonitor was able to generate the right spendable output descriptor
4813         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4814         assert_eq!(spend_txn.len(), 1);
4815         assert_eq!(spend_txn[0].input.len(), 1);
4816         check_spends!(spend_txn[0], node_txn[1]);
4817 }
4818
4819 #[test]
4820 fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
4821         let mut chanmon_cfgs = create_chanmon_cfgs(2);
4822         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
4823         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4824         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4825         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4826
4827         // Create some initial channels
4828         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4829
4830         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4831         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
4832         assert_eq!(revoked_local_txn[0].input.len(), 1);
4833         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4834
4835         // The to-be-revoked commitment tx should have one HTLC and one to_remote output
4836         assert_eq!(revoked_local_txn[0].output.len(), 2);
4837
4838         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4839
4840         // B will generate HTLC-Success from revoked commitment tx
4841         mine_transaction(&nodes[1], &revoked_local_txn[0]);
4842         check_closed_broadcast!(nodes[1], true);
4843         check_added_monitors!(nodes[1], 1);
4844         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
4845         let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4846
4847         assert_eq!(revoked_htlc_txn.len(), 1);
4848         assert_eq!(revoked_htlc_txn[0].input.len(), 1);
4849         assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4850         check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]);
4851
4852         // Check that the unspent (of two) outputs on revoked_local_txn[0] is a P2WPKH:
4853         let unspent_local_txn_output = revoked_htlc_txn[0].input[0].previous_output.vout as usize ^ 1;
4854         assert_eq!(revoked_local_txn[0].output[unspent_local_txn_output].script_pubkey.len(), 2 + 20); // P2WPKH
4855
4856         // A will generate justice tx from B's revoked commitment/HTLC tx
4857         connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
4858         check_closed_broadcast!(nodes[0], true);
4859         check_added_monitors!(nodes[0], 1);
4860         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
4861
4862         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
4863         assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success
4864
4865         // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0]
4866         // including the one already spent by revoked_htlc_txn[0]. That's OK, we'll spend with valid
4867         // transactions next...
4868         assert_eq!(node_txn[0].input.len(), 2);
4869         check_spends!(node_txn[0], revoked_local_txn[0], revoked_htlc_txn[0]);
4870         if node_txn[0].input[1].previous_output.txid == revoked_htlc_txn[0].txid() {
4871                 assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4872         } else {
4873                 assert_eq!(node_txn[0].input[0].previous_output.txid, revoked_htlc_txn[0].txid());
4874                 assert_eq!(node_txn[0].input[1].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4875         }
4876
4877         assert_eq!(node_txn[1].input.len(), 1);
4878         check_spends!(node_txn[1], revoked_htlc_txn[0]);
4879
4880         mine_transaction(&nodes[0], &node_txn[1]);
4881         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
4882
4883         // Note that nodes[0]'s tx_broadcaster is still locked, so if we get here the channelmonitor
4884         // didn't try to generate any new transactions.
4885
4886         // Check A's ChannelMonitor was able to generate the right spendable output descriptor
4887         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
4888         assert_eq!(spend_txn.len(), 3);
4889         assert_eq!(spend_txn[0].input.len(), 1);
4890         check_spends!(spend_txn[0], revoked_local_txn[0]); // spending to_remote output from revoked local tx
4891         assert_ne!(spend_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
4892         check_spends!(spend_txn[1], node_txn[1]); // spending justice tx output on the htlc success tx
4893         check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[1]); // Both outputs
4894 }
4895
4896 #[test]
4897 fn test_onchain_to_onchain_claim() {
4898         // Test that in case of channel closure, we detect the state of output and claim HTLC
4899         // on downstream peer's remote commitment tx.
4900         // First, have C claim an HTLC against its own latest commitment transaction.
4901         // Then, broadcast these to B, which should update the monitor downstream on the A<->B
4902         // channel.
4903         // Finally, check that B will claim the HTLC output if A's latest commitment transaction
4904         // gets broadcast.
4905
4906         let chanmon_cfgs = create_chanmon_cfgs(3);
4907         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
4908         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
4909         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
4910
4911         // Create some initial channels
4912         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4913         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4914
4915         // Ensure all nodes are at the same height
4916         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
4917         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
4918         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
4919         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
4920
4921         // Rebalance the network a bit by relaying one payment through all the channels ...
4922         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
4923         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
4924
4925         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
4926         let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
4927         check_spends!(commitment_tx[0], chan_2.3);
4928         nodes[2].node.claim_funds(payment_preimage);
4929         expect_payment_claimed!(nodes[2], payment_hash, 3_000_000);
4930         check_added_monitors!(nodes[2], 1);
4931         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
4932         assert!(updates.update_add_htlcs.is_empty());
4933         assert!(updates.update_fail_htlcs.is_empty());
4934         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
4935         assert!(updates.update_fail_malformed_htlcs.is_empty());
4936
4937         mine_transaction(&nodes[2], &commitment_tx[0]);
4938         check_closed_broadcast!(nodes[2], true);
4939         check_added_monitors!(nodes[2], 1);
4940         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
4941
4942         let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 1 (HTLC-Success tx)
4943         assert_eq!(c_txn.len(), 1);
4944         check_spends!(c_txn[0], commitment_tx[0]);
4945         assert_eq!(c_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
4946         assert!(c_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
4947         assert_eq!(c_txn[0].lock_time, LockTime::ZERO); // Success tx
4948
4949         // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor
4950         connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), c_txn[0].clone()]));
4951         check_added_monitors!(nodes[1], 1);
4952         let events = nodes[1].node.get_and_clear_pending_events();
4953         assert_eq!(events.len(), 2);
4954         match events[0] {
4955                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
4956                 _ => panic!("Unexpected event"),
4957         }
4958         match events[1] {
4959                 Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
4960                         next_channel_id, outbound_amount_forwarded_msat, ..
4961                 } => {
4962                         assert_eq!(total_fee_earned_msat, Some(1000));
4963                         assert_eq!(prev_channel_id, Some(chan_1.2));
4964                         assert_eq!(claim_from_onchain_tx, true);
4965                         assert_eq!(next_channel_id, Some(chan_2.2));
4966                         assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
4967                 },
4968                 _ => panic!("Unexpected event"),
4969         }
4970         check_added_monitors!(nodes[1], 1);
4971         let mut msg_events = nodes[1].node.get_and_clear_pending_msg_events();
4972         assert_eq!(msg_events.len(), 3);
4973         let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut msg_events);
4974         let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut msg_events);
4975
4976         match nodes_2_event {
4977                 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id: _ } => {},
4978                 _ => panic!("Unexpected event"),
4979         }
4980
4981         match nodes_0_event {
4982                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
4983                         assert!(update_add_htlcs.is_empty());
4984                         assert!(update_fail_htlcs.is_empty());
4985                         assert_eq!(update_fulfill_htlcs.len(), 1);
4986                         assert!(update_fail_malformed_htlcs.is_empty());
4987                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
4988                 },
4989                 _ => panic!("Unexpected event"),
4990         };
4991
4992         // Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
4993         match msg_events[0] {
4994                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
4995                 _ => panic!("Unexpected event"),
4996         }
4997
4998         // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
4999         let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
5000         mine_transaction(&nodes[1], &commitment_tx[0]);
5001         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
5002         let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
5003         // ChannelMonitor: HTLC-Success tx
5004         assert_eq!(b_txn.len(), 1);
5005         check_spends!(b_txn[0], commitment_tx[0]);
5006         assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5007         assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
5008         assert_eq!(b_txn[0].lock_time.to_consensus_u32(), nodes[1].best_block_info().1); // Success tx
5009
5010         check_closed_broadcast!(nodes[1], true);
5011         check_added_monitors!(nodes[1], 1);
5012 }
5013
5014 #[test]
5015 fn test_duplicate_payment_hash_one_failure_one_success() {
5016         // Topology : A --> B --> C --> D
5017         // We route 2 payments with same hash between B and C, one will be timeout, the other successfully claim
5018         // Note that because C will refuse to generate two payment secrets for the same payment hash,
5019         // we forward one of the payments onwards to D.
5020         let chanmon_cfgs = create_chanmon_cfgs(4);
5021         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
5022         // When this test was written, the default base fee floated based on the HTLC count.
5023         // It is now fixed, so we simply set the fee to the expected value here.
5024         let mut config = test_default_channel_config();
5025         config.channel_config.forwarding_fee_base_msat = 196;
5026         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs,
5027                 &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
5028         let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
5029
5030         create_announced_chan_between_nodes(&nodes, 0, 1);
5031         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
5032         create_announced_chan_between_nodes(&nodes, 2, 3);
5033
5034         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
5035         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
5036         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
5037         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
5038         connect_blocks(&nodes[3], node_max_height - nodes[3].best_block_info().1);
5039
5040         let (our_payment_preimage, duplicate_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 900_000);
5041
5042         let payment_secret = nodes[3].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200, None).unwrap();
5043         // We reduce the final CLTV here by a somewhat arbitrary constant to keep it under the one-byte
5044         // script push size limit so that the below script length checks match
5045         // ACCEPTED_HTLC_SCRIPT_WEIGHT.
5046         let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV - 40)
5047                 .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap();
5048         let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[3], payment_params, 800_000);
5049         send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[2], &nodes[3]]], 800_000, duplicate_payment_hash, payment_secret);
5050
5051         let commitment_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
5052         assert_eq!(commitment_txn[0].input.len(), 1);
5053         check_spends!(commitment_txn[0], chan_2.3);
5054
5055         mine_transaction(&nodes[1], &commitment_txn[0]);
5056         check_closed_broadcast!(nodes[1], true);
5057         check_added_monitors!(nodes[1], 1);
5058         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
5059         connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
5060
5061         let htlc_timeout_tx;
5062         { // Extract one of the two HTLC-Timeout transaction
5063                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5064                 // ChannelMonitor: timeout tx * 2-or-3
5065                 assert!(node_txn.len() == 2 || node_txn.len() == 3);
5066
5067                 check_spends!(node_txn[0], commitment_txn[0]);
5068                 assert_eq!(node_txn[0].input.len(), 1);
5069                 assert_eq!(node_txn[0].output.len(), 1);
5070
5071                 if node_txn.len() > 2 {
5072                         check_spends!(node_txn[1], commitment_txn[0]);
5073                         assert_eq!(node_txn[1].input.len(), 1);
5074                         assert_eq!(node_txn[1].output.len(), 1);
5075                         assert_eq!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
5076
5077                         check_spends!(node_txn[2], commitment_txn[0]);
5078                         assert_eq!(node_txn[2].input.len(), 1);
5079                         assert_eq!(node_txn[2].output.len(), 1);
5080                         assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output);
5081                 } else {
5082                         check_spends!(node_txn[1], commitment_txn[0]);
5083                         assert_eq!(node_txn[1].input.len(), 1);
5084                         assert_eq!(node_txn[1].output.len(), 1);
5085                         assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
5086                 }
5087
5088                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5089                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5090                 // Assign htlc_timeout_tx to the forwarded HTLC (with value ~800 sats). The received HTLC
5091                 // (with value 900 sats) will be claimed in the below `claim_funds` call.
5092                 if node_txn.len() > 2 {
5093                         assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5094                         htlc_timeout_tx = if node_txn[2].output[0].value < 900 { node_txn[2].clone() } else { node_txn[0].clone() };
5095                 } else {
5096                         htlc_timeout_tx = if node_txn[0].output[0].value < 900 { node_txn[1].clone() } else { node_txn[0].clone() };
5097                 }
5098         }
5099
5100         nodes[2].node.claim_funds(our_payment_preimage);
5101         expect_payment_claimed!(nodes[2], duplicate_payment_hash, 900_000);
5102
5103         mine_transaction(&nodes[2], &commitment_txn[0]);
5104         check_added_monitors!(nodes[2], 2);
5105         check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
5106         let events = nodes[2].node.get_and_clear_pending_msg_events();
5107         match events[0] {
5108                 MessageSendEvent::UpdateHTLCs { .. } => {},
5109                 _ => panic!("Unexpected event"),
5110         }
5111         match events[2] {
5112                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
5113                 _ => panic!("Unexepected event"),
5114         }
5115         let htlc_success_txn: Vec<_> = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
5116         assert_eq!(htlc_success_txn.len(), 2); // ChannelMonitor: HTLC-Success txn (*2 due to 2-HTLC outputs)
5117         check_spends!(htlc_success_txn[0], commitment_txn[0]);
5118         check_spends!(htlc_success_txn[1], commitment_txn[0]);
5119         assert_eq!(htlc_success_txn[0].input.len(), 1);
5120         assert_eq!(htlc_success_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5121         assert_eq!(htlc_success_txn[1].input.len(), 1);
5122         assert_eq!(htlc_success_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5123         assert_ne!(htlc_success_txn[0].input[0].previous_output, htlc_success_txn[1].input[0].previous_output);
5124         assert_ne!(htlc_success_txn[1].input[0].previous_output, htlc_timeout_tx.input[0].previous_output);
5125
5126         mine_transaction(&nodes[1], &htlc_timeout_tx);
5127         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5128         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
5129         let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5130         assert!(htlc_updates.update_add_htlcs.is_empty());
5131         assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
5132         let first_htlc_id = htlc_updates.update_fail_htlcs[0].htlc_id;
5133         assert!(htlc_updates.update_fulfill_htlcs.is_empty());
5134         assert!(htlc_updates.update_fail_malformed_htlcs.is_empty());
5135         check_added_monitors!(nodes[1], 1);
5136
5137         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
5138         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
5139         {
5140                 commitment_signed_dance!(nodes[0], nodes[1], &htlc_updates.commitment_signed, false, true);
5141         }
5142         expect_payment_failed_with_update!(nodes[0], duplicate_payment_hash, false, chan_2.0.contents.short_channel_id, true);
5143
5144         // Solve 2nd HTLC by broadcasting on B's chain HTLC-Success Tx from C
5145         mine_transaction(&nodes[1], &htlc_success_txn[1]);
5146         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(196), true, true);
5147         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5148         assert!(updates.update_add_htlcs.is_empty());
5149         assert!(updates.update_fail_htlcs.is_empty());
5150         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
5151         assert_ne!(updates.update_fulfill_htlcs[0].htlc_id, first_htlc_id);
5152         assert!(updates.update_fail_malformed_htlcs.is_empty());
5153         check_added_monitors!(nodes[1], 1);
5154
5155         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
5156         commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false);
5157         expect_payment_sent(&nodes[0], our_payment_preimage, None, true, true);
5158 }
5159
5160 #[test]
5161 fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
5162         let chanmon_cfgs = create_chanmon_cfgs(2);
5163         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5164         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5165         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5166
5167         // Create some initial channels
5168         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5169
5170         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
5171         let local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
5172         assert_eq!(local_txn.len(), 1);
5173         assert_eq!(local_txn[0].input.len(), 1);
5174         check_spends!(local_txn[0], chan_1.3);
5175
5176         // Give B knowledge of preimage to be able to generate a local HTLC-Success Tx
5177         nodes[1].node.claim_funds(payment_preimage);
5178         expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
5179         check_added_monitors!(nodes[1], 1);
5180
5181         mine_transaction(&nodes[1], &local_txn[0]);
5182         check_added_monitors!(nodes[1], 1);
5183         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
5184         let events = nodes[1].node.get_and_clear_pending_msg_events();
5185         match events[0] {
5186                 MessageSendEvent::UpdateHTLCs { .. } => {},
5187                 _ => panic!("Unexpected event"),
5188         }
5189         match events[2] {
5190                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
5191                 _ => panic!("Unexepected event"),
5192         }
5193         let node_tx = {
5194                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5195                 assert_eq!(node_txn.len(), 1);
5196                 assert_eq!(node_txn[0].input.len(), 1);
5197                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5198                 check_spends!(node_txn[0], local_txn[0]);
5199                 node_txn[0].clone()
5200         };
5201
5202         mine_transaction(&nodes[1], &node_tx);
5203         connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1);
5204
5205         // Verify that B is able to spend its own HTLC-Success tx thanks to spendable output event given back by its ChannelMonitor
5206         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5207         assert_eq!(spend_txn.len(), 1);
5208         assert_eq!(spend_txn[0].input.len(), 1);
5209         check_spends!(spend_txn[0], node_tx);
5210         assert_eq!(spend_txn[0].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5211 }
5212
5213 fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, announce_latest: bool) {
5214         // Test that we fail backwards the full set of HTLCs we need to when remote broadcasts an
5215         // unrevoked commitment transaction.
5216         // This includes HTLCs which were below the dust threshold as well as HTLCs which were awaiting
5217         // a remote RAA before they could be failed backwards (and combinations thereof).
5218         // We also test duplicate-hash HTLCs by adding two nodes on each side of the target nodes which
5219         // use the same payment hashes.
5220         // Thus, we use a six-node network:
5221         //
5222         // A \         / E
5223         //    - C - D -
5224         // B /         \ F
5225         // And test where C fails back to A/B when D announces its latest commitment transaction
5226         let chanmon_cfgs = create_chanmon_cfgs(6);
5227         let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
5228         // When this test was written, the default base fee floated based on the HTLC count.
5229         // It is now fixed, so we simply set the fee to the expected value here.
5230         let mut config = test_default_channel_config();
5231         config.channel_config.forwarding_fee_base_msat = 196;
5232         let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs,
5233                 &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
5234         let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
5235
5236         let _chan_0_2 = create_announced_chan_between_nodes(&nodes, 0, 2);
5237         let _chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
5238         let chan_2_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
5239         let chan_3_4 = create_announced_chan_between_nodes(&nodes, 3, 4);
5240         let chan_3_5  = create_announced_chan_between_nodes(&nodes, 3, 5);
5241
5242         // Rebalance and check output sanity...
5243         send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000);
5244         send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000);
5245         assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2);
5246
5247         let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id())
5248                 .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context().holder_dust_limit_satoshis;
5249         // 0th HTLC:
5250         let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5251         // 1st HTLC:
5252         let (_, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5253         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
5254         // 2nd HTLC:
5255         send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_1, nodes[5].node.create_inbound_payment_for_hash(payment_hash_1, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5256         // 3rd HTLC:
5257         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_2, nodes[5].node.create_inbound_payment_for_hash(payment_hash_2, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5258         // 4th HTLC:
5259         let (_, payment_hash_3, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5260         // 5th HTLC:
5261         let (_, payment_hash_4, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5262         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
5263         // 6th HTLC:
5264         send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_3, nodes[5].node.create_inbound_payment_for_hash(payment_hash_3, None, 7200, None).unwrap());
5265         // 7th HTLC:
5266         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_4, nodes[5].node.create_inbound_payment_for_hash(payment_hash_4, None, 7200, None).unwrap());
5267
5268         // 8th HTLC:
5269         let (_, payment_hash_5, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5270         // 9th HTLC:
5271         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
5272         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_5, nodes[5].node.create_inbound_payment_for_hash(payment_hash_5, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
5273
5274         // 10th HTLC:
5275         let (_, payment_hash_6, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5276         // 11th HTLC:
5277         let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
5278         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_6, nodes[5].node.create_inbound_payment_for_hash(payment_hash_6, None, 7200, None).unwrap());
5279
5280         // Double-check that six of the new HTLC were added
5281         // We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie,
5282         // with to_local and to_remote outputs, 8 outputs and 6 HTLCs not included).
5283         assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2).len(), 1);
5284         assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 8);
5285
5286         // Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go.
5287         // Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs
5288         nodes[4].node.fail_htlc_backwards(&payment_hash_1);
5289         nodes[4].node.fail_htlc_backwards(&payment_hash_3);
5290         nodes[4].node.fail_htlc_backwards(&payment_hash_5);
5291         nodes[4].node.fail_htlc_backwards(&payment_hash_6);
5292         check_added_monitors!(nodes[4], 0);
5293
5294         let failed_destinations = vec![
5295                 HTLCDestination::FailedPayment { payment_hash: payment_hash_1 },
5296                 HTLCDestination::FailedPayment { payment_hash: payment_hash_3 },
5297                 HTLCDestination::FailedPayment { payment_hash: payment_hash_5 },
5298                 HTLCDestination::FailedPayment { payment_hash: payment_hash_6 },
5299         ];
5300         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[4], failed_destinations);
5301         check_added_monitors!(nodes[4], 1);
5302
5303         let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id());
5304         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[0]);
5305         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[1]);
5306         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[2]);
5307         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[3]);
5308         commitment_signed_dance!(nodes[3], nodes[4], four_removes.commitment_signed, false);
5309
5310         // Fail 3rd below-dust and 7th above-dust HTLCs
5311         nodes[5].node.fail_htlc_backwards(&payment_hash_2);
5312         nodes[5].node.fail_htlc_backwards(&payment_hash_4);
5313         check_added_monitors!(nodes[5], 0);
5314
5315         let failed_destinations_2 = vec![
5316                 HTLCDestination::FailedPayment { payment_hash: payment_hash_2 },
5317                 HTLCDestination::FailedPayment { payment_hash: payment_hash_4 },
5318         ];
5319         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[5], failed_destinations_2);
5320         check_added_monitors!(nodes[5], 1);
5321
5322         let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id());
5323         nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[0]);
5324         nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]);
5325         commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false);
5326
5327         let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
5328
5329         // After 4 and 2 removes respectively above in nodes[4] and nodes[5], nodes[3] should receive 6 PaymentForwardedFailed events
5330         let failed_destinations_3 = vec![
5331                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5332                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5333                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5334                 HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
5335                 HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
5336                 HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
5337         ];
5338         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations_3);
5339         check_added_monitors!(nodes[3], 1);
5340         let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
5341         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]);
5342         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[1]);
5343         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[2]);
5344         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[3]);
5345         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[4]);
5346         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[5]);
5347         if deliver_last_raa {
5348                 commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false);
5349         } else {
5350                 let _cs_last_raa = commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false, true, false, true);
5351         }
5352
5353         // D's latest commitment transaction now contains 1st + 2nd + 9th HTLCs (implicitly, they're
5354         // below the dust limit) and the 5th + 6th + 11th HTLCs. It has failed back the 0th, 3rd, 4th,
5355         // 7th, 8th, and 10th, but as we haven't yet delivered the final RAA to C, the fails haven't
5356         // propagated back to A/B yet (and D has two unrevoked commitment transactions).
5357         //
5358         // We now broadcast the latest commitment transaction, which *should* result in failures for
5359         // the 0th, 1st, 2nd, 3rd, 4th, 7th, 8th, 9th, and 10th HTLCs, ie all the below-dust HTLCs and
5360         // the non-broadcast above-dust HTLCs.
5361         //
5362         // Alternatively, we may broadcast the previous commitment transaction, which should only
5363         // result in failures for the below-dust HTLCs, ie the 0th, 1st, 2nd, 3rd, 9th, and 10th HTLCs.
5364         let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
5365
5366         if announce_latest {
5367                 mine_transaction(&nodes[2], &ds_last_commitment_tx[0]);
5368         } else {
5369                 mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]);
5370         }
5371         let events = nodes[2].node.get_and_clear_pending_events();
5372         let close_event = if deliver_last_raa {
5373                 assert_eq!(events.len(), 2 + 6);
5374                 events.last().clone().unwrap()
5375         } else {
5376                 assert_eq!(events.len(), 1);
5377                 events.last().clone().unwrap()
5378         };
5379         match close_event {
5380                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
5381                 _ => panic!("Unexpected event"),
5382         }
5383
5384         connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
5385         check_closed_broadcast!(nodes[2], true);
5386         if deliver_last_raa {
5387                 expect_pending_htlcs_forwardable_from_events!(nodes[2], events[1..2], true);
5388
5389                 let expected_destinations: Vec<HTLCDestination> = repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect();
5390                 expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations);
5391         } else {
5392                 let expected_destinations: Vec<HTLCDestination> = if announce_latest {
5393                         repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(9).collect()
5394                 } else {
5395                         repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(6).collect()
5396                 };
5397
5398                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], expected_destinations);
5399         }
5400         check_added_monitors!(nodes[2], 3);
5401
5402         let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events();
5403         assert_eq!(cs_msgs.len(), 2);
5404         let mut a_done = false;
5405         for msg in cs_msgs {
5406                 match msg {
5407                         MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
5408                                 // Both under-dust HTLCs and the one above-dust HTLC that we had already failed
5409                                 // should be failed-backwards here.
5410                                 let target = if *node_id == nodes[0].node.get_our_node_id() {
5411                                         // If announce_latest, expect 0th, 1st, 4th, 8th, 10th HTLCs, else only 0th, 1st, 10th below-dust HTLCs
5412                                         for htlc in &updates.update_fail_htlcs {
5413                                                 assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 6 || if announce_latest { htlc.htlc_id == 3 || htlc.htlc_id == 5 } else { false });
5414                                         }
5415                                         assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 5 } else { 3 });
5416                                         assert!(!a_done);
5417                                         a_done = true;
5418                                         &nodes[0]
5419                                 } else {
5420                                         // If announce_latest, expect 2nd, 3rd, 7th, 9th HTLCs, else only 2nd, 3rd, 9th below-dust HTLCs
5421                                         for htlc in &updates.update_fail_htlcs {
5422                                                 assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 5 || if announce_latest { htlc.htlc_id == 4 } else { false });
5423                                         }
5424                                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
5425                                         assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 4 } else { 3 });
5426                                         &nodes[1]
5427                                 };
5428                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
5429                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[1]);
5430                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[2]);
5431                                 if announce_latest {
5432                                         target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[3]);
5433                                         if *node_id == nodes[0].node.get_our_node_id() {
5434                                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[4]);
5435                                         }
5436                                 }
5437                                 commitment_signed_dance!(target, nodes[2], updates.commitment_signed, false, true);
5438                         },
5439                         _ => panic!("Unexpected event"),
5440                 }
5441         }
5442
5443         let as_events = nodes[0].node.get_and_clear_pending_events();
5444         assert_eq!(as_events.len(), if announce_latest { 10 } else { 6 });
5445         let mut as_faileds = new_hash_set();
5446         let mut as_updates = 0;
5447         for event in as_events.iter() {
5448                 if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
5449                         assert!(as_faileds.insert(*payment_hash));
5450                         if *payment_hash != payment_hash_2 {
5451                                 assert_eq!(*payment_failed_permanently, deliver_last_raa);
5452                         } else {
5453                                 assert!(!payment_failed_permanently);
5454                         }
5455                         if let PathFailure::OnPath { network_update: Some(_) } = failure {
5456                                 as_updates += 1;
5457                         }
5458                 } else if let &Event::PaymentFailed { .. } = event {
5459                 } else { panic!("Unexpected event"); }
5460         }
5461         assert!(as_faileds.contains(&payment_hash_1));
5462         assert!(as_faileds.contains(&payment_hash_2));
5463         if announce_latest {
5464                 assert!(as_faileds.contains(&payment_hash_3));
5465                 assert!(as_faileds.contains(&payment_hash_5));
5466         }
5467         assert!(as_faileds.contains(&payment_hash_6));
5468
5469         let bs_events = nodes[1].node.get_and_clear_pending_events();
5470         assert_eq!(bs_events.len(), if announce_latest { 8 } else { 6 });
5471         let mut bs_faileds = new_hash_set();
5472         let mut bs_updates = 0;
5473         for event in bs_events.iter() {
5474                 if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
5475                         assert!(bs_faileds.insert(*payment_hash));
5476                         if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 {
5477                                 assert_eq!(*payment_failed_permanently, deliver_last_raa);
5478                         } else {
5479                                 assert!(!payment_failed_permanently);
5480                         }
5481                         if let PathFailure::OnPath { network_update: Some(_) } = failure {
5482                                 bs_updates += 1;
5483                         }
5484                 } else if let &Event::PaymentFailed { .. } = event {
5485                 } else { panic!("Unexpected event"); }
5486         }
5487         assert!(bs_faileds.contains(&payment_hash_1));
5488         assert!(bs_faileds.contains(&payment_hash_2));
5489         if announce_latest {
5490                 assert!(bs_faileds.contains(&payment_hash_4));
5491         }
5492         assert!(bs_faileds.contains(&payment_hash_5));
5493
5494         // For each HTLC which was not failed-back by normal process (ie deliver_last_raa), we should
5495         // get a NetworkUpdate. A should have gotten 4 HTLCs which were failed-back due to
5496         // unknown-preimage-etc, B should have gotten 2. Thus, in the
5497         // announce_latest && deliver_last_raa case, we should have 5-4=1 and 4-2=2 NetworkUpdates.
5498         assert_eq!(as_updates, if deliver_last_raa { 1 } else if !announce_latest { 3 } else { 5 });
5499         assert_eq!(bs_updates, if deliver_last_raa { 2 } else if !announce_latest { 3 } else { 4 });
5500 }
5501
5502 #[test]
5503 fn test_fail_backwards_latest_remote_announce_a() {
5504         do_test_fail_backwards_unrevoked_remote_announce(false, true);
5505 }
5506
5507 #[test]
5508 fn test_fail_backwards_latest_remote_announce_b() {
5509         do_test_fail_backwards_unrevoked_remote_announce(true, true);
5510 }
5511
5512 #[test]
5513 fn test_fail_backwards_previous_remote_announce() {
5514         do_test_fail_backwards_unrevoked_remote_announce(false, false);
5515         // Note that true, true doesn't make sense as it implies we announce a revoked state, which is
5516         // tested for in test_commitment_revoked_fail_backward_exhaustive()
5517 }
5518
5519 #[test]
5520 fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
5521         let chanmon_cfgs = create_chanmon_cfgs(2);
5522         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5523         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5524         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5525
5526         // Create some initial channels
5527         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5528
5529         let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
5530         let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
5531         assert_eq!(local_txn[0].input.len(), 1);
5532         check_spends!(local_txn[0], chan_1.3);
5533
5534         // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
5535         mine_transaction(&nodes[0], &local_txn[0]);
5536         check_closed_broadcast!(nodes[0], true);
5537         check_added_monitors!(nodes[0], 1);
5538         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
5539         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
5540
5541         let htlc_timeout = {
5542                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5543                 assert_eq!(node_txn.len(), 1);
5544                 assert_eq!(node_txn[0].input.len(), 1);
5545                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5546                 check_spends!(node_txn[0], local_txn[0]);
5547                 node_txn[0].clone()
5548         };
5549
5550         mine_transaction(&nodes[0], &htlc_timeout);
5551         connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
5552         expect_payment_failed!(nodes[0], our_payment_hash, false);
5553
5554         // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
5555         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
5556         assert_eq!(spend_txn.len(), 3);
5557         check_spends!(spend_txn[0], local_txn[0]);
5558         assert_eq!(spend_txn[1].input.len(), 1);
5559         check_spends!(spend_txn[1], htlc_timeout);
5560         assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5561         assert_eq!(spend_txn[2].input.len(), 2);
5562         check_spends!(spend_txn[2], local_txn[0], htlc_timeout);
5563         assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 ||
5564                 spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32);
5565 }
5566
5567 #[test]
5568 fn test_key_derivation_params() {
5569         // This test is a copy of test_dynamic_spendable_outputs_local_htlc_timeout_tx, with a key
5570         // manager rotation to test that `channel_keys_id` returned in
5571         // [`SpendableOutputDescriptor::DelayedPaymentOutput`] let us re-derive the channel key set to
5572         // then derive a `delayed_payment_key`.
5573
5574         let chanmon_cfgs = create_chanmon_cfgs(3);
5575
5576         // We manually create the node configuration to backup the seed.
5577         let seed = [42; 32];
5578         let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
5579         let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager);
5580         let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[0].logger));
5581         let scorer = RwLock::new(test_utils::TestScorer::new());
5582         let router = test_utils::TestRouter::new(network_graph.clone(), &chanmon_cfgs[0].logger, &scorer);
5583         let message_router = test_utils::TestMessageRouter::new(network_graph.clone(), &keys_manager);
5584         let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, message_router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) };
5585         let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
5586         node_cfgs.remove(0);
5587         node_cfgs.insert(0, node);
5588
5589         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
5590         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
5591
5592         // Create some initial channels
5593         // Create a dummy channel to advance index by one and thus test re-derivation correctness
5594         // for node 0
5595         let chan_0 = create_announced_chan_between_nodes(&nodes, 0, 2);
5596         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5597         assert_ne!(chan_0.3.output[0].script_pubkey, chan_1.3.output[0].script_pubkey);
5598
5599         // Ensure all nodes are at the same height
5600         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
5601         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
5602         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
5603         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
5604
5605         let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
5606         let local_txn_0 = get_local_commitment_txn!(nodes[0], chan_0.2);
5607         let local_txn_1 = get_local_commitment_txn!(nodes[0], chan_1.2);
5608         assert_eq!(local_txn_1[0].input.len(), 1);
5609         check_spends!(local_txn_1[0], chan_1.3);
5610
5611         // We check funding pubkey are unique
5612         let (from_0_funding_key_0, from_0_funding_key_1) = (PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][36..69]));
5613         let (from_1_funding_key_0, from_1_funding_key_1) = (PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][36..69]));
5614         if from_0_funding_key_0 == from_1_funding_key_0
5615             || from_0_funding_key_0 == from_1_funding_key_1
5616             || from_0_funding_key_1 == from_1_funding_key_0
5617             || from_0_funding_key_1 == from_1_funding_key_1 {
5618                 panic!("Funding pubkeys aren't unique");
5619         }
5620
5621         // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
5622         mine_transaction(&nodes[0], &local_txn_1[0]);
5623         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
5624         check_closed_broadcast!(nodes[0], true);
5625         check_added_monitors!(nodes[0], 1);
5626         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
5627
5628         let htlc_timeout = {
5629                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5630                 assert_eq!(node_txn.len(), 1);
5631                 assert_eq!(node_txn[0].input.len(), 1);
5632                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5633                 check_spends!(node_txn[0], local_txn_1[0]);
5634                 node_txn[0].clone()
5635         };
5636
5637         mine_transaction(&nodes[0], &htlc_timeout);
5638         connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
5639         expect_payment_failed!(nodes[0], our_payment_hash, false);
5640
5641         // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
5642         let new_keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
5643         let spend_txn = check_spendable_outputs!(nodes[0], new_keys_manager);
5644         assert_eq!(spend_txn.len(), 3);
5645         check_spends!(spend_txn[0], local_txn_1[0]);
5646         assert_eq!(spend_txn[1].input.len(), 1);
5647         check_spends!(spend_txn[1], htlc_timeout);
5648         assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
5649         assert_eq!(spend_txn[2].input.len(), 2);
5650         check_spends!(spend_txn[2], local_txn_1[0], htlc_timeout);
5651         assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 ||
5652                 spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32);
5653 }
5654
5655 #[test]
5656 fn test_static_output_closing_tx() {
5657         let chanmon_cfgs = create_chanmon_cfgs(2);
5658         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5659         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5660         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5661
5662         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5663
5664         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
5665         let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
5666
5667         mine_transaction(&nodes[0], &closing_tx);
5668         check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
5669         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
5670
5671         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
5672         assert_eq!(spend_txn.len(), 1);
5673         check_spends!(spend_txn[0], closing_tx);
5674
5675         mine_transaction(&nodes[1], &closing_tx);
5676         check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
5677         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5678
5679         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5680         assert_eq!(spend_txn.len(), 1);
5681         check_spends!(spend_txn[0], closing_tx);
5682 }
5683
5684 fn do_htlc_claim_local_commitment_only(use_dust: bool) {
5685         let chanmon_cfgs = create_chanmon_cfgs(2);
5686         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5687         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5688         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5689         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5690
5691         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3_000_000 });
5692
5693         // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being
5694         // present in B's local commitment transaction, but none of A's commitment transactions.
5695         nodes[1].node.claim_funds(payment_preimage);
5696         check_added_monitors!(nodes[1], 1);
5697         expect_payment_claimed!(nodes[1], payment_hash, if use_dust { 50000 } else { 3_000_000 });
5698
5699         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5700         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
5701         expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
5702
5703         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
5704         check_added_monitors!(nodes[0], 1);
5705         let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5706         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0);
5707         check_added_monitors!(nodes[1], 1);
5708
5709         let starting_block = nodes[1].best_block_info();
5710         let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
5711         for _ in starting_block.1 + 1..TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + starting_block.1 + 2 {
5712                 connect_block(&nodes[1], &block);
5713                 block.header.prev_blockhash = block.block_hash();
5714         }
5715         test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
5716         check_closed_broadcast!(nodes[1], true);
5717         check_added_monitors!(nodes[1], 1);
5718         check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 100000);
5719 }
5720
5721 fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
5722         let chanmon_cfgs = create_chanmon_cfgs(2);
5723         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5724         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5725         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5726         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5727
5728         let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], if use_dust { 50000 } else { 3000000 });
5729         nodes[0].node.send_payment_with_route(&route, payment_hash,
5730                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
5731         check_added_monitors!(nodes[0], 1);
5732
5733         let _as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5734
5735         // As far as A is concerned, the HTLC is now present only in the latest remote commitment
5736         // transaction, however it is not in A's latest local commitment, so we can just broadcast that
5737         // to "time out" the HTLC.
5738
5739         let starting_block = nodes[1].best_block_info();
5740         let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
5741
5742         for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + starting_block.1 + 2 {
5743                 connect_block(&nodes[0], &block);
5744                 block.header.prev_blockhash = block.block_hash();
5745         }
5746         test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
5747         check_closed_broadcast!(nodes[0], true);
5748         check_added_monitors!(nodes[0], 1);
5749         check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
5750 }
5751
5752 fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
5753         let chanmon_cfgs = create_chanmon_cfgs(3);
5754         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
5755         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
5756         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
5757         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
5758
5759         // Fail the payment, but don't deliver A's final RAA, resulting in the HTLC only being present
5760         // in B's previous (unrevoked) commitment transaction, but none of A's commitment transactions.
5761         // Also optionally test that we *don't* fail the channel in case the commitment transaction was
5762         // actually revoked.
5763         let htlc_value = if use_dust { 50000 } else { 3000000 };
5764         let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
5765         nodes[1].node.fail_htlc_backwards(&our_payment_hash);
5766         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
5767         check_added_monitors!(nodes[1], 1);
5768
5769         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5770         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
5771         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
5772         check_added_monitors!(nodes[0], 1);
5773         let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5774         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0);
5775         check_added_monitors!(nodes[1], 1);
5776         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.1);
5777         check_added_monitors!(nodes[1], 1);
5778         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
5779
5780         if check_revoke_no_close {
5781                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
5782                 check_added_monitors!(nodes[0], 1);
5783         }
5784
5785         let starting_block = nodes[1].best_block_info();
5786         let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
5787         for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 2 {
5788                 connect_block(&nodes[0], &block);
5789                 block.header.prev_blockhash = block.block_hash();
5790         }
5791         if !check_revoke_no_close {
5792                 test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
5793                 check_closed_broadcast!(nodes[0], true);
5794                 check_added_monitors!(nodes[0], 1);
5795                 check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
5796         } else {
5797                 expect_payment_failed!(nodes[0], our_payment_hash, true);
5798         }
5799 }
5800
5801 // Test that we close channels on-chain when broadcastable HTLCs reach their timeout window.
5802 // There are only a few cases to test here:
5803 //  * its not really normative behavior, but we test that below-dust HTLCs "included" in
5804 //    broadcastable commitment transactions result in channel closure,
5805 //  * its included in an unrevoked-but-previous remote commitment transaction,
5806 //  * its included in the latest remote or local commitment transactions.
5807 // We test each of the three possible commitment transactions individually and use both dust and
5808 // non-dust HTLCs.
5809 // Note that we don't bother testing both outbound and inbound HTLC failures for each case, and we
5810 // assume they are handled the same across all six cases, as both outbound and inbound failures are
5811 // tested for at least one of the cases in other tests.
5812 #[test]
5813 fn htlc_claim_single_commitment_only_a() {
5814         do_htlc_claim_local_commitment_only(true);
5815         do_htlc_claim_local_commitment_only(false);
5816
5817         do_htlc_claim_current_remote_commitment_only(true);
5818         do_htlc_claim_current_remote_commitment_only(false);
5819 }
5820
5821 #[test]
5822 fn htlc_claim_single_commitment_only_b() {
5823         do_htlc_claim_previous_remote_commitment_only(true, false);
5824         do_htlc_claim_previous_remote_commitment_only(false, false);
5825         do_htlc_claim_previous_remote_commitment_only(true, true);
5826         do_htlc_claim_previous_remote_commitment_only(false, true);
5827 }
5828
5829 #[test]
5830 #[should_panic]
5831 fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be on its own as we are catching a panic
5832         let chanmon_cfgs = create_chanmon_cfgs(2);
5833         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5834         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5835         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5836         // Force duplicate randomness for every get-random call
5837         for node in nodes.iter() {
5838                 *node.keys_manager.override_random_bytes.lock().unwrap() = Some([0; 32]);
5839         }
5840
5841         // BOLT #2 spec: Sending node must ensure temporary_channel_id is unique from any other channel ID with the same peer.
5842         let channel_value_satoshis=10000;
5843         let push_msat=10001;
5844         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).unwrap();
5845         let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5846         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel);
5847         get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
5848
5849         // Create a second channel with the same random values. This used to panic due to a colliding
5850         // channel_id, but now panics due to a colliding outbound SCID alias.
5851         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err());
5852 }
5853
5854 #[test]
5855 fn bolt2_open_channel_sending_node_checks_part2() {
5856         let chanmon_cfgs = create_chanmon_cfgs(2);
5857         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5858         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5859         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5860
5861         // BOLT #2 spec: Sending node must set funding_satoshis to less than 2^24 satoshis
5862         let channel_value_satoshis=2^24;
5863         let push_msat=10001;
5864         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err());
5865
5866         // BOLT #2 spec: Sending node must set push_msat to equal or less than 1000 * funding_satoshis
5867         let channel_value_satoshis=10000;
5868         // Test when push_msat is equal to 1000 * funding_satoshis.
5869         let push_msat=1000*channel_value_satoshis+1;
5870         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err());
5871
5872         // BOLT #2 spec: Sending node must set set channel_reserve_satoshis greater than or equal to dust_limit_satoshis
5873         let channel_value_satoshis=10000;
5874         let push_msat=10001;
5875         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_ok()); //Create a valid channel
5876         let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5877         assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.common_fields.dust_limit_satoshis);
5878
5879         // BOLT #2 spec: Sending node must set undefined bits in channel_flags to 0
5880         // Only the least-significant bit of channel_flags is currently defined resulting in channel_flags only having one of two possible states 0 or 1
5881         assert!(node0_to_1_send_open_channel.common_fields.channel_flags<=1);
5882
5883         // BOLT #2 spec: Sending node should set to_self_delay sufficient to ensure the sender can irreversibly spend a commitment transaction output, in case of misbehaviour by the receiver.
5884         assert!(BREAKDOWN_TIMEOUT>0);
5885         assert!(node0_to_1_send_open_channel.common_fields.to_self_delay==BREAKDOWN_TIMEOUT);
5886
5887         // BOLT #2 spec: Sending node must ensure the chain_hash value identifies the chain it wishes to open the channel within.
5888         let chain_hash = ChainHash::using_genesis_block(Network::Testnet);
5889         assert_eq!(node0_to_1_send_open_channel.common_fields.chain_hash, chain_hash);
5890
5891         // BOLT #2 spec: Sending node must set funding_pubkey, revocation_basepoint, htlc_basepoint, payment_basepoint, and delayed_payment_basepoint to valid DER-encoded, compressed, secp256k1 pubkeys.
5892         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.funding_pubkey.serialize()).is_ok());
5893         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.revocation_basepoint.serialize()).is_ok());
5894         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.htlc_basepoint.serialize()).is_ok());
5895         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.payment_basepoint.serialize()).is_ok());
5896         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.delayed_payment_basepoint.serialize()).is_ok());
5897 }
5898
5899 #[test]
5900 fn bolt2_open_channel_sane_dust_limit() {
5901         let chanmon_cfgs = create_chanmon_cfgs(2);
5902         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5903         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5904         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5905
5906         let channel_value_satoshis=1000000;
5907         let push_msat=10001;
5908         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).unwrap();
5909         let mut node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
5910         node0_to_1_send_open_channel.common_fields.dust_limit_satoshis = 547;
5911         node0_to_1_send_open_channel.channel_reserve_satoshis = 100001;
5912
5913         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel);
5914         let events = nodes[1].node.get_and_clear_pending_msg_events();
5915         let err_msg = match events[0] {
5916                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
5917                         msg.clone()
5918                 },
5919                 _ => panic!("Unexpected event"),
5920         };
5921         assert_eq!(err_msg.data, "dust_limit_satoshis (547) is greater than the implementation limit (546)");
5922 }
5923
5924 // Test that if we fail to send an HTLC that is being freed from the holding cell, and the HTLC
5925 // originated from our node, its failure is surfaced to the user. We trigger this failure to
5926 // free the HTLC by increasing our fee while the HTLC is in the holding cell such that the HTLC
5927 // is no longer affordable once it's freed.
5928 #[test]
5929 fn test_fail_holding_cell_htlc_upon_free() {
5930         let chanmon_cfgs = create_chanmon_cfgs(2);
5931         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5932         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5933         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5934         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
5935
5936         // First nodes[0] generates an update_fee, setting the channel's
5937         // pending_update_fee.
5938         {
5939                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
5940                 *feerate_lock += 20;
5941         }
5942         nodes[0].node.timer_tick_occurred();
5943         check_added_monitors!(nodes[0], 1);
5944
5945         let events = nodes[0].node.get_and_clear_pending_msg_events();
5946         assert_eq!(events.len(), 1);
5947         let (update_msg, commitment_signed) = match events[0] {
5948                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
5949                         (update_fee.as_ref(), commitment_signed)
5950                 },
5951                 _ => panic!("Unexpected event"),
5952         };
5953
5954         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
5955
5956         let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5957         let channel_reserve = chan_stat.channel_reserve_msat;
5958         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
5959         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
5960
5961         // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
5962         let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
5963         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
5964
5965         // Send a payment which passes reserve checks but gets stuck in the holding cell.
5966         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
5967                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
5968         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5969         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send);
5970
5971         // Flush the pending fee update.
5972         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
5973         let (as_revoke_and_ack, _) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5974         check_added_monitors!(nodes[1], 1);
5975         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack);
5976         check_added_monitors!(nodes[0], 1);
5977
5978         // Upon receipt of the RAA, there will be an attempt to resend the holding cell
5979         // HTLC, but now that the fee has been raised the payment will now fail, causing
5980         // us to surface its failure to the user.
5981         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
5982         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
5983         nodes[0].logger.assert_log("lightning::ln::channel", format!("Freeing holding cell with 1 HTLC updates in channel {}", chan.2), 1);
5984
5985         // Check that the payment failed to be sent out.
5986         let events = nodes[0].node.get_and_clear_pending_events();
5987         assert_eq!(events.len(), 2);
5988         match &events[0] {
5989                 &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => {
5990                         assert_eq!(PaymentId(our_payment_hash.0), *payment_id.as_ref().unwrap());
5991                         assert_eq!(our_payment_hash.clone(), *payment_hash);
5992                         assert_eq!(*payment_failed_permanently, false);
5993                         assert_eq!(*short_channel_id, Some(route.paths[0].hops[0].short_channel_id));
5994                 },
5995                 _ => panic!("Unexpected event"),
5996         }
5997         match &events[1] {
5998                 &Event::PaymentFailed { ref payment_hash, .. } => {
5999                         assert_eq!(our_payment_hash.clone(), *payment_hash);
6000                 },
6001                 _ => panic!("Unexpected event"),
6002         }
6003 }
6004
6005 // Test that if multiple HTLCs are released from the holding cell and one is
6006 // valid but the other is no longer valid upon release, the valid HTLC can be
6007 // successfully completed while the other one fails as expected.
6008 #[test]
6009 fn test_free_and_fail_holding_cell_htlcs() {
6010         let chanmon_cfgs = create_chanmon_cfgs(2);
6011         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6012         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6013         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6014         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6015
6016         // First nodes[0] generates an update_fee, setting the channel's
6017         // pending_update_fee.
6018         {
6019                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
6020                 *feerate_lock += 200;
6021         }
6022         nodes[0].node.timer_tick_occurred();
6023         check_added_monitors!(nodes[0], 1);
6024
6025         let events = nodes[0].node.get_and_clear_pending_msg_events();
6026         assert_eq!(events.len(), 1);
6027         let (update_msg, commitment_signed) = match events[0] {
6028                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
6029                         (update_fee.as_ref(), commitment_signed)
6030                 },
6031                 _ => panic!("Unexpected event"),
6032         };
6033
6034         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
6035
6036         let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6037         let channel_reserve = chan_stat.channel_reserve_msat;
6038         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
6039         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
6040
6041         // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
6042         let amt_1 = 20000;
6043         let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features) - amt_1;
6044         let (route_1, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_1);
6045         let (route_2, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_2);
6046
6047         // Send 2 payments which pass reserve checks but get stuck in the holding cell.
6048         nodes[0].node.send_payment_with_route(&route_1, payment_hash_1,
6049                 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
6050         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6051         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1);
6052         let payment_id_2 = PaymentId(nodes[0].keys_manager.get_secure_random_bytes());
6053         nodes[0].node.send_payment_with_route(&route_2, payment_hash_2,
6054                 RecipientOnionFields::secret_only(payment_secret_2), payment_id_2).unwrap();
6055         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6056         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1 + amt_2);
6057
6058         // Flush the pending fee update.
6059         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
6060         let (revoke_and_ack, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6061         check_added_monitors!(nodes[1], 1);
6062         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_and_ack);
6063         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
6064         check_added_monitors!(nodes[0], 2);
6065
6066         // Upon receipt of the RAA, there will be an attempt to resend the holding cell HTLCs,
6067         // but now that the fee has been raised the second payment will now fail, causing us
6068         // to surface its failure to the user. The first payment should succeed.
6069         chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6070         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
6071         nodes[0].logger.assert_log("lightning::ln::channel", format!("Freeing holding cell with 2 HTLC updates in channel {}", chan.2), 1);
6072
6073         // Check that the second payment failed to be sent out.
6074         let events = nodes[0].node.get_and_clear_pending_events();
6075         assert_eq!(events.len(), 2);
6076         match &events[0] {
6077                 &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => {
6078                         assert_eq!(payment_id_2, *payment_id.as_ref().unwrap());
6079                         assert_eq!(payment_hash_2.clone(), *payment_hash);
6080                         assert_eq!(*payment_failed_permanently, false);
6081                         assert_eq!(*short_channel_id, Some(route_2.paths[0].hops[0].short_channel_id));
6082                 },
6083                 _ => panic!("Unexpected event"),
6084         }
6085         match &events[1] {
6086                 &Event::PaymentFailed { ref payment_hash, .. } => {
6087                         assert_eq!(payment_hash_2.clone(), *payment_hash);
6088                 },
6089                 _ => panic!("Unexpected event"),
6090         }
6091
6092         // Complete the first payment and the RAA from the fee update.
6093         let (payment_event, send_raa_event) = {
6094                 let mut msgs = nodes[0].node.get_and_clear_pending_msg_events();
6095                 assert_eq!(msgs.len(), 2);
6096                 (SendEvent::from_event(msgs.remove(0)), msgs.remove(0))
6097         };
6098         let raa = match send_raa_event {
6099                 MessageSendEvent::SendRevokeAndACK { msg, .. } => msg,
6100                 _ => panic!("Unexpected event"),
6101         };
6102         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
6103         check_added_monitors!(nodes[1], 1);
6104         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6105         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6106         let events = nodes[1].node.get_and_clear_pending_events();
6107         assert_eq!(events.len(), 1);
6108         match events[0] {
6109                 Event::PendingHTLCsForwardable { .. } => {},
6110                 _ => panic!("Unexpected event"),
6111         }
6112         nodes[1].node.process_pending_htlc_forwards();
6113         let events = nodes[1].node.get_and_clear_pending_events();
6114         assert_eq!(events.len(), 1);
6115         match events[0] {
6116                 Event::PaymentClaimable { .. } => {},
6117                 _ => panic!("Unexpected event"),
6118         }
6119         nodes[1].node.claim_funds(payment_preimage_1);
6120         check_added_monitors!(nodes[1], 1);
6121         expect_payment_claimed!(nodes[1], payment_hash_1, amt_1);
6122
6123         let update_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6124         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msgs.update_fulfill_htlcs[0]);
6125         commitment_signed_dance!(nodes[0], nodes[1], update_msgs.commitment_signed, false, true);
6126         expect_payment_sent!(nodes[0], payment_preimage_1);
6127 }
6128
6129 // Test that if we fail to forward an HTLC that is being freed from the holding cell that the
6130 // HTLC is failed backwards. We trigger this failure to forward the freed HTLC by increasing
6131 // our fee while the HTLC is in the holding cell such that the HTLC is no longer affordable
6132 // once it's freed.
6133 #[test]
6134 fn test_fail_holding_cell_htlc_upon_free_multihop() {
6135         let chanmon_cfgs = create_chanmon_cfgs(3);
6136         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6137         // Avoid having to include routing fees in calculations
6138         let mut config = test_default_channel_config();
6139         config.channel_config.forwarding_fee_base_msat = 0;
6140         config.channel_config.forwarding_fee_proportional_millionths = 0;
6141         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
6142         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6143         let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6144         let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000);
6145
6146         // First nodes[1] generates an update_fee, setting the channel's
6147         // pending_update_fee.
6148         {
6149                 let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
6150                 *feerate_lock += 20;
6151         }
6152         nodes[1].node.timer_tick_occurred();
6153         check_added_monitors!(nodes[1], 1);
6154
6155         let events = nodes[1].node.get_and_clear_pending_msg_events();
6156         assert_eq!(events.len(), 1);
6157         let (update_msg, commitment_signed) = match events[0] {
6158                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
6159                         (update_fee.as_ref(), commitment_signed)
6160                 },
6161                 _ => panic!("Unexpected event"),
6162         };
6163
6164         nodes[2].node.handle_update_fee(&nodes[1].node.get_our_node_id(), update_msg.unwrap());
6165
6166         let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan_0_1.2);
6167         let channel_reserve = chan_stat.channel_reserve_msat;
6168         let feerate = get_feerate!(nodes[0], nodes[1], chan_0_1.2);
6169         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_0_1.2);
6170
6171         // Send a payment which passes reserve checks but gets stuck in the holding cell.
6172         let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
6173         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], max_can_send);
6174         let payment_event = {
6175                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6176                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6177                 check_added_monitors!(nodes[0], 1);
6178
6179                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6180                 assert_eq!(events.len(), 1);
6181
6182                 SendEvent::from_event(events.remove(0))
6183         };
6184         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6185         check_added_monitors!(nodes[1], 0);
6186         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6187         expect_pending_htlcs_forwardable!(nodes[1]);
6188
6189         chan_stat = get_channel_value_stat!(nodes[1], nodes[2], chan_1_2.2);
6190         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send);
6191
6192         // Flush the pending fee update.
6193         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
6194         let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
6195         check_added_monitors!(nodes[2], 1);
6196         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &raa);
6197         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &commitment_signed);
6198         check_added_monitors!(nodes[1], 2);
6199
6200         // A final RAA message is generated to finalize the fee update.
6201         let events = nodes[1].node.get_and_clear_pending_msg_events();
6202         assert_eq!(events.len(), 1);
6203
6204         let raa_msg = match &events[0] {
6205                 &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => {
6206                         msg.clone()
6207                 },
6208                 _ => panic!("Unexpected event"),
6209         };
6210
6211         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa_msg);
6212         check_added_monitors!(nodes[2], 1);
6213         assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
6214
6215         // nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process.
6216         let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events();
6217         assert_eq!(process_htlc_forwards_event.len(), 2);
6218         match &process_htlc_forwards_event[1] {
6219                 &Event::PendingHTLCsForwardable { .. } => {},
6220                 _ => panic!("Unexpected event"),
6221         }
6222
6223         // In response, we call ChannelManager's process_pending_htlc_forwards
6224         nodes[1].node.process_pending_htlc_forwards();
6225         check_added_monitors!(nodes[1], 1);
6226
6227         // This causes the HTLC to be failed backwards.
6228         let fail_event = nodes[1].node.get_and_clear_pending_msg_events();
6229         assert_eq!(fail_event.len(), 1);
6230         let (fail_msg, commitment_signed) = match &fail_event[0] {
6231                 &MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
6232                         assert_eq!(updates.update_add_htlcs.len(), 0);
6233                         assert_eq!(updates.update_fulfill_htlcs.len(), 0);
6234                         assert_eq!(updates.update_fail_malformed_htlcs.len(), 0);
6235                         assert_eq!(updates.update_fail_htlcs.len(), 1);
6236                         (updates.update_fail_htlcs[0].clone(), updates.commitment_signed.clone())
6237                 },
6238                 _ => panic!("Unexpected event"),
6239         };
6240
6241         // Pass the failure messages back to nodes[0].
6242         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg);
6243         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
6244
6245         // Complete the HTLC failure+removal process.
6246         let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6247         check_added_monitors!(nodes[0], 1);
6248         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
6249         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed);
6250         check_added_monitors!(nodes[1], 2);
6251         let final_raa_event = nodes[1].node.get_and_clear_pending_msg_events();
6252         assert_eq!(final_raa_event.len(), 1);
6253         let raa = match &final_raa_event[0] {
6254                 &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => msg.clone(),
6255                 _ => panic!("Unexpected event"),
6256         };
6257         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa);
6258         expect_payment_failed_with_update!(nodes[0], our_payment_hash, false, chan_1_2.0.contents.short_channel_id, false);
6259         check_added_monitors!(nodes[0], 1);
6260 }
6261
6262 #[test]
6263 fn test_payment_route_reaching_same_channel_twice() {
6264         //A route should not go through the same channel twice
6265         //It is enforced when constructing a route.
6266         let chanmon_cfgs = create_chanmon_cfgs(2);
6267         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6268         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6269         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6270         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
6271
6272         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0)
6273                 .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
6274         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000);
6275
6276         // Extend the path by itself, essentially simulating route going through same channel twice
6277         let cloned_hops = route.paths[0].hops.clone();
6278         route.paths[0].hops.extend_from_slice(&cloned_hops);
6279
6280         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6281                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6282         ), false, APIError::InvalidRoute { ref err },
6283         assert_eq!(err, &"Path went through the same channel twice"));
6284 }
6285
6286 // BOLT 2 Requirements for the Sender when constructing and sending an update_add_htlc message.
6287 // BOLT 2 Requirement: MUST NOT offer amount_msat it cannot pay for in the remote commitment transaction at the current feerate_per_kw (see "Updating Fees") while maintaining its channel reserve.
6288 //TODO: I don't believe this is explicitly enforced when sending an HTLC but as the Fee aspect of the BOLT specs is in flux leaving this as a TODO.
6289
6290 #[test]
6291 fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() {
6292         //BOLT2 Requirement: MUST NOT offer amount_msat below the receiving node's htlc_minimum_msat (same validation check catches both of these)
6293         let chanmon_cfgs = create_chanmon_cfgs(2);
6294         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6295         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6296         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6297         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6298
6299         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6300         route.paths[0].hops[0].fee_msat = 100;
6301
6302         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6303                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6304                 ), true, APIError::ChannelUnavailable { .. }, {});
6305         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6306 }
6307
6308 #[test]
6309 fn test_update_add_htlc_bolt2_sender_zero_value_msat() {
6310         //BOLT2 Requirement: MUST offer amount_msat greater than 0.
6311         let chanmon_cfgs = create_chanmon_cfgs(2);
6312         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6313         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6314         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6315         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6316
6317         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6318         route.paths[0].hops[0].fee_msat = 0;
6319         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6320                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)),
6321                 true, APIError::ChannelUnavailable { ref err },
6322                 assert_eq!(err, "Cannot send 0-msat HTLC"));
6323
6324         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6325         nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send 0-msat HTLC", 1);
6326 }
6327
6328 #[test]
6329 fn test_update_add_htlc_bolt2_receiver_zero_value_msat() {
6330         //BOLT2 Requirement: MUST offer amount_msat greater than 0.
6331         let chanmon_cfgs = create_chanmon_cfgs(2);
6332         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6333         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6334         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6335         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6336
6337         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6338         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6339                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6340         check_added_monitors!(nodes[0], 1);
6341         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6342         updates.update_add_htlcs[0].amount_msat = 0;
6343
6344         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6345         nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC", 3);
6346         check_closed_broadcast!(nodes[1], true).unwrap();
6347         check_added_monitors!(nodes[1], 1);
6348         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() },
6349                 [nodes[0].node.get_our_node_id()], 100000);
6350 }
6351
6352 #[test]
6353 fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() {
6354         //BOLT 2 Requirement: MUST set cltv_expiry less than 500000000.
6355         //It is enforced when constructing a route.
6356         let chanmon_cfgs = create_chanmon_cfgs(2);
6357         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6358         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6359         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6360         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
6361
6362         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0)
6363                 .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
6364         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000);
6365         route.paths[0].hops.last_mut().unwrap().cltv_expiry_delta = 500000001;
6366         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6367                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6368                 ), true, APIError::InvalidRoute { ref err },
6369                 assert_eq!(err, &"Channel CLTV overflowed?"));
6370 }
6371
6372 #[test]
6373 fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() {
6374         //BOLT 2 Requirement: if result would be offering more than the remote's max_accepted_htlcs HTLCs, in the remote commitment transaction: MUST NOT add an HTLC.
6375         //BOLT 2 Requirement: for the first HTLC it offers MUST set id to 0.
6376         //BOLT 2 Requirement: MUST increase the value of id by 1 for each successive offer.
6377         let chanmon_cfgs = create_chanmon_cfgs(2);
6378         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6379         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6380         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6381         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
6382         let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
6383                 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().counterparty_max_accepted_htlcs as u64;
6384
6385         // Fetch a route in advance as we will be unable to once we're unable to send.
6386         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6387         for i in 0..max_accepted_htlcs {
6388                 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
6389                 let payment_event = {
6390                         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6391                                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6392                         check_added_monitors!(nodes[0], 1);
6393
6394                         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6395                         assert_eq!(events.len(), 1);
6396                         if let MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate{ update_add_htlcs: ref htlcs, .. }, } = events[0] {
6397                                 assert_eq!(htlcs[0].htlc_id, i);
6398                         } else {
6399                                 assert!(false);
6400                         }
6401                         SendEvent::from_event(events.remove(0))
6402                 };
6403                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6404                 check_added_monitors!(nodes[1], 0);
6405                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6406
6407                 expect_pending_htlcs_forwardable!(nodes[1]);
6408                 expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 100000);
6409         }
6410         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6411                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6412                 ), true, APIError::ChannelUnavailable { .. }, {});
6413
6414         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6415 }
6416
6417 #[test]
6418 fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() {
6419         //BOLT 2 Requirement: if the sum of total offered HTLCs would exceed the remote's max_htlc_value_in_flight_msat: MUST NOT add an HTLC.
6420         let chanmon_cfgs = create_chanmon_cfgs(2);
6421         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6422         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6423         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6424         let channel_value = 100000;
6425         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 0);
6426         let max_in_flight = get_channel_value_stat!(nodes[0], nodes[1], chan.2).counterparty_max_htlc_value_in_flight_msat;
6427
6428         send_payment(&nodes[0], &vec!(&nodes[1])[..], max_in_flight);
6429
6430         let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_in_flight);
6431         // Manually create a route over our max in flight (which our router normally automatically
6432         // limits us to.
6433         route.paths[0].hops[0].fee_msat =  max_in_flight + 1;
6434         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6435                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
6436                 ), true, APIError::ChannelUnavailable { .. }, {});
6437         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6438
6439         send_payment(&nodes[0], &[&nodes[1]], max_in_flight);
6440 }
6441
6442 // BOLT 2 Requirements for the Receiver when handling an update_add_htlc message.
6443 #[test]
6444 fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
6445         //BOLT2 Requirement: receiving an amount_msat equal to 0, OR less than its own htlc_minimum_msat -> SHOULD fail the channel.
6446         let chanmon_cfgs = create_chanmon_cfgs(2);
6447         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6448         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6449         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6450         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6451         let htlc_minimum_msat: u64;
6452         {
6453                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
6454                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
6455                 let channel = chan_lock.channel_by_id.get(&chan.2).unwrap();
6456                 htlc_minimum_msat = channel.context().get_holder_htlc_minimum_msat();
6457         }
6458
6459         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat);
6460         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6461                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6462         check_added_monitors!(nodes[0], 1);
6463         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6464         updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat-1;
6465         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6466         assert!(nodes[1].node.list_channels().is_empty());
6467         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6468         assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
6469         check_added_monitors!(nodes[1], 1);
6470         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6471 }
6472
6473 #[test]
6474 fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
6475         //BOLT2 Requirement: receiving an amount_msat that the sending node cannot afford at the current feerate_per_kw (while maintaining its channel reserve): SHOULD fail the channel
6476         let chanmon_cfgs = create_chanmon_cfgs(2);
6477         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6478         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6479         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6480         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6481
6482         let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
6483         let channel_reserve = chan_stat.channel_reserve_msat;
6484         let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
6485         let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
6486         // The 2* and +1 are for the fee spike reserve.
6487         let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
6488
6489         let max_can_send = 5000000 - channel_reserve - commit_tx_fee_outbound;
6490         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
6491         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6492                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6493         check_added_monitors!(nodes[0], 1);
6494         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6495
6496         // Even though channel-initiator senders are required to respect the fee_spike_reserve,
6497         // at this time channel-initiatee receivers are not required to enforce that senders
6498         // respect the fee_spike_reserve.
6499         updates.update_add_htlcs[0].amount_msat = max_can_send + commit_tx_fee_outbound + 1;
6500         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6501
6502         assert!(nodes[1].node.list_channels().is_empty());
6503         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6504         assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
6505         check_added_monitors!(nodes[1], 1);
6506         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6507 }
6508
6509 #[test]
6510 fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
6511         //BOLT 2 Requirement: if a sending node adds more than its max_accepted_htlcs HTLCs to its local commitment transaction: SHOULD fail the channel
6512         //BOLT 2 Requirement: MUST allow multiple HTLCs with the same payment_hash.
6513         let chanmon_cfgs = create_chanmon_cfgs(2);
6514         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6515         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6516         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6517         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6518
6519         let send_amt = 3999999;
6520         let (mut route, our_payment_hash, _, our_payment_secret) =
6521                 get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
6522         route.paths[0].hops[0].fee_msat = send_amt;
6523         let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
6524         let cur_height = nodes[0].node.best_block.read().unwrap().height + 1;
6525         let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route.paths[0], &session_priv).unwrap();
6526         let recipient_onion_fields = RecipientOnionFields::secret_only(our_payment_secret);
6527         let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
6528                 &route.paths[0], send_amt, &recipient_onion_fields, cur_height, &None).unwrap();
6529         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap();
6530
6531         let mut msg = msgs::UpdateAddHTLC {
6532                 channel_id: chan.2,
6533                 htlc_id: 0,
6534                 amount_msat: 1000,
6535                 payment_hash: our_payment_hash,
6536                 cltv_expiry: htlc_cltv,
6537                 onion_routing_packet: onion_packet.clone(),
6538                 skimmed_fee_msat: None,
6539                 blinding_point: None,
6540         };
6541
6542         for i in 0..50 {
6543                 msg.htlc_id = i as u64;
6544                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
6545         }
6546         msg.htlc_id = (50) as u64;
6547         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
6548
6549         assert!(nodes[1].node.list_channels().is_empty());
6550         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6551         assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
6552         check_added_monitors!(nodes[1], 1);
6553         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6554 }
6555
6556 #[test]
6557 fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() {
6558         //OR adds more than its max_htlc_value_in_flight_msat worth of offered HTLCs to its local commitment transaction: SHOULD fail the channel
6559         let chanmon_cfgs = create_chanmon_cfgs(2);
6560         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6561         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6562         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6563         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6564
6565         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6566         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6567                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6568         check_added_monitors!(nodes[0], 1);
6569         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6570         updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], nodes[0], chan.2).counterparty_max_htlc_value_in_flight_msat + 1;
6571         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6572
6573         assert!(nodes[1].node.list_channels().is_empty());
6574         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6575         assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str()));
6576         check_added_monitors!(nodes[1], 1);
6577         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 1000000);
6578 }
6579
6580 #[test]
6581 fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() {
6582         //BOLT2 Requirement: if sending node sets cltv_expiry to greater or equal to 500000000: SHOULD fail the channel.
6583         let chanmon_cfgs = create_chanmon_cfgs(2);
6584         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6585         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6586         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6587
6588         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
6589         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6590         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6591                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6592         check_added_monitors!(nodes[0], 1);
6593         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6594         updates.update_add_htlcs[0].cltv_expiry = 500000000;
6595         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6596
6597         assert!(nodes[1].node.list_channels().is_empty());
6598         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6599         assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height");
6600         check_added_monitors!(nodes[1], 1);
6601         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6602 }
6603
6604 #[test]
6605 fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
6606         //BOLT 2 requirement: if the sender did not previously acknowledge the commitment of that HTLC: MUST ignore a repeated id value after a reconnection.
6607         // We test this by first testing that that repeated HTLCs pass commitment signature checks
6608         // after disconnect and that non-sequential htlc_ids result in a channel failure.
6609         let chanmon_cfgs = create_chanmon_cfgs(2);
6610         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6611         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6612         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6613
6614         create_announced_chan_between_nodes(&nodes, 0, 1);
6615         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6616         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6617                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6618         check_added_monitors!(nodes[0], 1);
6619         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6620         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6621
6622         //Disconnect and Reconnect
6623         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
6624         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
6625         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
6626                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
6627         }, true).unwrap();
6628         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
6629         assert_eq!(reestablish_1.len(), 1);
6630         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
6631                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
6632         }, false).unwrap();
6633         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
6634         assert_eq!(reestablish_2.len(), 1);
6635         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
6636         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
6637         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
6638         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
6639
6640         //Resend HTLC
6641         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6642         assert_eq!(updates.commitment_signed.htlc_signatures.len(), 1);
6643         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed);
6644         check_added_monitors!(nodes[1], 1);
6645         let _bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6646
6647         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6648
6649         assert!(nodes[1].node.list_channels().is_empty());
6650         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6651         assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str()));
6652         check_added_monitors!(nodes[1], 1);
6653         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
6654 }
6655
6656 #[test]
6657 fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() {
6658         //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6659
6660         let chanmon_cfgs = create_chanmon_cfgs(2);
6661         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6662         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6663         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6664         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6665         let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6666         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6667                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6668
6669         check_added_monitors!(nodes[0], 1);
6670         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6671         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6672
6673         let update_msg = msgs::UpdateFulfillHTLC{
6674                 channel_id: chan.2,
6675                 htlc_id: 0,
6676                 payment_preimage: our_payment_preimage,
6677         };
6678
6679         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6680
6681         assert!(nodes[0].node.list_channels().is_empty());
6682         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6683         assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6684         check_added_monitors!(nodes[0], 1);
6685         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6686 }
6687
6688 #[test]
6689 fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() {
6690         //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6691
6692         let chanmon_cfgs = create_chanmon_cfgs(2);
6693         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6694         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6695         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6696         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6697
6698         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6699         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6700                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6701         check_added_monitors!(nodes[0], 1);
6702         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6703         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6704
6705         let update_msg = msgs::UpdateFailHTLC{
6706                 channel_id: chan.2,
6707                 htlc_id: 0,
6708                 reason: msgs::OnionErrorPacket { data: Vec::new()},
6709         };
6710
6711         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6712
6713         assert!(nodes[0].node.list_channels().is_empty());
6714         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6715         assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6716         check_added_monitors!(nodes[0], 1);
6717         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6718 }
6719
6720 #[test]
6721 fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() {
6722         //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6723
6724         let chanmon_cfgs = create_chanmon_cfgs(2);
6725         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6726         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6727         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6728         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6729
6730         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6731         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6732                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6733         check_added_monitors!(nodes[0], 1);
6734         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6735         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6736         let update_msg = msgs::UpdateFailMalformedHTLC{
6737                 channel_id: chan.2,
6738                 htlc_id: 0,
6739                 sha256_of_onion: [1; 32],
6740                 failure_code: 0x8000,
6741         };
6742
6743         nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6744
6745         assert!(nodes[0].node.list_channels().is_empty());
6746         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6747         assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6748         check_added_monitors!(nodes[0], 1);
6749         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6750 }
6751
6752 #[test]
6753 fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
6754         //BOLT 2 Requirement: A receiving node: if the id does not correspond to an HTLC in its current commitment transaction MUST fail the channel.
6755
6756         let chanmon_cfgs = create_chanmon_cfgs(2);
6757         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6758         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6759         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6760         create_announced_chan_between_nodes(&nodes, 0, 1);
6761
6762         let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
6763
6764         nodes[1].node.claim_funds(our_payment_preimage);
6765         check_added_monitors!(nodes[1], 1);
6766         expect_payment_claimed!(nodes[1], our_payment_hash, 100_000);
6767
6768         let events = nodes[1].node.get_and_clear_pending_msg_events();
6769         assert_eq!(events.len(), 1);
6770         let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
6771                 match events[0] {
6772                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6773                                 assert!(update_add_htlcs.is_empty());
6774                                 assert_eq!(update_fulfill_htlcs.len(), 1);
6775                                 assert!(update_fail_htlcs.is_empty());
6776                                 assert!(update_fail_malformed_htlcs.is_empty());
6777                                 assert!(update_fee.is_none());
6778                                 update_fulfill_htlcs[0].clone()
6779                         },
6780                         _ => panic!("Unexpected event"),
6781                 }
6782         };
6783
6784         update_fulfill_msg.htlc_id = 1;
6785
6786         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
6787
6788         assert!(nodes[0].node.list_channels().is_empty());
6789         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6790         assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find");
6791         check_added_monitors!(nodes[0], 1);
6792         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6793 }
6794
6795 #[test]
6796 fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
6797         //BOLT 2 Requirement: A receiving node: if the payment_preimage value in update_fulfill_htlc doesn't SHA256 hash to the corresponding HTLC payment_hash MUST fail the channel.
6798
6799         let chanmon_cfgs = create_chanmon_cfgs(2);
6800         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6801         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6802         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6803         create_announced_chan_between_nodes(&nodes, 0, 1);
6804
6805         let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
6806
6807         nodes[1].node.claim_funds(our_payment_preimage);
6808         check_added_monitors!(nodes[1], 1);
6809         expect_payment_claimed!(nodes[1], our_payment_hash, 100_000);
6810
6811         let events = nodes[1].node.get_and_clear_pending_msg_events();
6812         assert_eq!(events.len(), 1);
6813         let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
6814                 match events[0] {
6815                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6816                                 assert!(update_add_htlcs.is_empty());
6817                                 assert_eq!(update_fulfill_htlcs.len(), 1);
6818                                 assert!(update_fail_htlcs.is_empty());
6819                                 assert!(update_fail_malformed_htlcs.is_empty());
6820                                 assert!(update_fee.is_none());
6821                                 update_fulfill_htlcs[0].clone()
6822                         },
6823                         _ => panic!("Unexpected event"),
6824                 }
6825         };
6826
6827         update_fulfill_msg.payment_preimage = PaymentPreimage([1; 32]);
6828
6829         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
6830
6831         assert!(nodes[0].node.list_channels().is_empty());
6832         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6833         assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str()));
6834         check_added_monitors!(nodes[0], 1);
6835         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
6836 }
6837
6838 #[test]
6839 fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_message() {
6840         //BOLT 2 Requirement: A receiving node: if the BADONION bit in failure_code is not set for update_fail_malformed_htlc MUST fail the channel.
6841
6842         let chanmon_cfgs = create_chanmon_cfgs(2);
6843         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6844         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6845         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6846         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6847
6848         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
6849         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6850                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6851         check_added_monitors!(nodes[0], 1);
6852
6853         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6854         updates.update_add_htlcs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
6855
6856         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6857         check_added_monitors!(nodes[1], 0);
6858         commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true);
6859
6860         let events = nodes[1].node.get_and_clear_pending_msg_events();
6861
6862         let mut update_msg: msgs::UpdateFailMalformedHTLC = {
6863                 match events[0] {
6864                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6865                                 assert!(update_add_htlcs.is_empty());
6866                                 assert!(update_fulfill_htlcs.is_empty());
6867                                 assert!(update_fail_htlcs.is_empty());
6868                                 assert_eq!(update_fail_malformed_htlcs.len(), 1);
6869                                 assert!(update_fee.is_none());
6870                                 update_fail_malformed_htlcs[0].clone()
6871                         },
6872                         _ => panic!("Unexpected event"),
6873                 }
6874         };
6875         update_msg.failure_code &= !0x8000;
6876         nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6877
6878         assert!(nodes[0].node.list_channels().is_empty());
6879         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6880         assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set");
6881         check_added_monitors!(nodes[0], 1);
6882         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 1000000);
6883 }
6884
6885 #[test]
6886 fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_update_fail_htlc() {
6887         //BOLT 2 Requirement: a receiving node which has an outgoing HTLC canceled by update_fail_malformed_htlc:
6888         //    * MUST return an error in the update_fail_htlc sent to the link which originally sent the HTLC, using the failure_code given and setting the data to sha256_of_onion.
6889
6890         let chanmon_cfgs = create_chanmon_cfgs(3);
6891         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6892         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
6893         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6894         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
6895         let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000);
6896
6897         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
6898
6899         //First hop
6900         let mut payment_event = {
6901                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6902                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6903                 check_added_monitors!(nodes[0], 1);
6904                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6905                 assert_eq!(events.len(), 1);
6906                 SendEvent::from_event(events.remove(0))
6907         };
6908         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6909         check_added_monitors!(nodes[1], 0);
6910         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6911         expect_pending_htlcs_forwardable!(nodes[1]);
6912         let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
6913         assert_eq!(events_2.len(), 1);
6914         check_added_monitors!(nodes[1], 1);
6915         payment_event = SendEvent::from_event(events_2.remove(0));
6916         assert_eq!(payment_event.msgs.len(), 1);
6917
6918         //Second Hop
6919         payment_event.msgs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
6920         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
6921         check_added_monitors!(nodes[2], 0);
6922         commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true);
6923
6924         let events_3 = nodes[2].node.get_and_clear_pending_msg_events();
6925         assert_eq!(events_3.len(), 1);
6926         let update_msg : (msgs::UpdateFailMalformedHTLC, msgs::CommitmentSigned) = {
6927                 match events_3[0] {
6928                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
6929                                 assert!(update_add_htlcs.is_empty());
6930                                 assert!(update_fulfill_htlcs.is_empty());
6931                                 assert!(update_fail_htlcs.is_empty());
6932                                 assert_eq!(update_fail_malformed_htlcs.len(), 1);
6933                                 assert!(update_fee.is_none());
6934                                 (update_fail_malformed_htlcs[0].clone(), commitment_signed.clone())
6935                         },
6936                         _ => panic!("Unexpected event"),
6937                 }
6938         };
6939
6940         nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), &update_msg.0);
6941
6942         check_added_monitors!(nodes[1], 0);
6943         commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true);
6944         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
6945         let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
6946         assert_eq!(events_4.len(), 1);
6947
6948         //Confirm that handlinge the update_malformed_htlc message produces an update_fail_htlc message to be forwarded back along the route
6949         match events_4[0] {
6950                 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
6951                         assert!(update_add_htlcs.is_empty());
6952                         assert!(update_fulfill_htlcs.is_empty());
6953                         assert_eq!(update_fail_htlcs.len(), 1);
6954                         assert!(update_fail_malformed_htlcs.is_empty());
6955                         assert!(update_fee.is_none());
6956                 },
6957                 _ => panic!("Unexpected event"),
6958         };
6959
6960         check_added_monitors!(nodes[1], 1);
6961 }
6962
6963 #[test]
6964 fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() {
6965         let chanmon_cfgs = create_chanmon_cfgs(3);
6966         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6967         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
6968         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6969         create_announced_chan_between_nodes(&nodes, 0, 1);
6970         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
6971
6972         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100_000);
6973
6974         // First hop
6975         let mut payment_event = {
6976                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
6977                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
6978                 check_added_monitors!(nodes[0], 1);
6979                 SendEvent::from_node(&nodes[0])
6980         };
6981
6982         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6983         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6984         expect_pending_htlcs_forwardable!(nodes[1]);
6985         check_added_monitors!(nodes[1], 1);
6986         payment_event = SendEvent::from_node(&nodes[1]);
6987         assert_eq!(payment_event.msgs.len(), 1);
6988
6989         // Second Hop
6990         payment_event.msgs[0].onion_routing_packet.version = 1; // Trigger an invalid_onion_version error
6991         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
6992         check_added_monitors!(nodes[2], 0);
6993         commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true);
6994
6995         let events_3 = nodes[2].node.get_and_clear_pending_msg_events();
6996         assert_eq!(events_3.len(), 1);
6997         match events_3[0] {
6998                 MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
6999                         let mut update_msg = updates.update_fail_malformed_htlcs[0].clone();
7000                         // Set the NODE bit (BADONION and PERM already set in invalid_onion_version error)
7001                         update_msg.failure_code |= 0x2000;
7002
7003                         nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), &update_msg);
7004                         commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true);
7005                 },
7006                 _ => panic!("Unexpected event"),
7007         }
7008
7009         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1],
7010                 vec![HTLCDestination::NextHopChannel {
7011                         node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
7012         let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
7013         assert_eq!(events_4.len(), 1);
7014         check_added_monitors!(nodes[1], 1);
7015
7016         match events_4[0] {
7017                 MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
7018                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
7019                         commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true);
7020                 },
7021                 _ => panic!("Unexpected event"),
7022         }
7023
7024         let events_5 = nodes[0].node.get_and_clear_pending_events();
7025         assert_eq!(events_5.len(), 2);
7026
7027         // Expect a PaymentPathFailed event with a ChannelFailure network update for the channel between
7028         // the node originating the error to its next hop.
7029         match events_5[0] {
7030                 Event::PaymentPathFailed { error_code, failure: PathFailure::OnPath { network_update: Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent }) }, ..
7031                 } => {
7032                         assert_eq!(short_channel_id, chan_2.0.contents.short_channel_id);
7033                         assert!(is_permanent);
7034                         assert_eq!(error_code, Some(0x8000|0x4000|0x2000|4));
7035                 },
7036                 _ => panic!("Unexpected event"),
7037         }
7038         match events_5[1] {
7039                 Event::PaymentFailed { payment_hash, .. } => {
7040                         assert_eq!(payment_hash, our_payment_hash);
7041                 },
7042                 _ => panic!("Unexpected event"),
7043         }
7044
7045         // TODO: Test actual removal of channel from NetworkGraph when it's implemented.
7046 }
7047
7048 fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
7049         // Dust-HTLC failure updates must be delayed until failure-trigger tx (in this case local commitment) reach ANTI_REORG_DELAY
7050         // We can have at most two valid local commitment tx, so both cases must be covered, and both txs must be checked to get them all as
7051         // HTLC could have been removed from lastest local commitment tx but still valid until we get remote RAA
7052
7053         let mut chanmon_cfgs = create_chanmon_cfgs(2);
7054         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
7055         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7056         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7057         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7058         let chan =create_announced_chan_between_nodes(&nodes, 0, 1);
7059
7060         let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
7061                 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
7062
7063         // We route 2 dust-HTLCs between A and B
7064         let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
7065         let (_, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
7066         route_payment(&nodes[0], &[&nodes[1]], 1000000);
7067
7068         // Cache one local commitment tx as previous
7069         let as_prev_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
7070
7071         // Fail one HTLC to prune it in the will-be-latest-local commitment tx
7072         nodes[1].node.fail_htlc_backwards(&payment_hash_2);
7073         check_added_monitors!(nodes[1], 0);
7074         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
7075         check_added_monitors!(nodes[1], 1);
7076
7077         let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
7078         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &remove.update_fail_htlcs[0]);
7079         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &remove.commitment_signed);
7080         check_added_monitors!(nodes[0], 1);
7081
7082         // Cache one local commitment tx as lastest
7083         let as_last_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
7084
7085         let events = nodes[0].node.get_and_clear_pending_msg_events();
7086         match events[0] {
7087                 MessageSendEvent::SendRevokeAndACK { node_id, .. } => {
7088                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
7089                 },
7090                 _ => panic!("Unexpected event"),
7091         }
7092         match events[1] {
7093                 MessageSendEvent::UpdateHTLCs { node_id, .. } => {
7094                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
7095                 },
7096                 _ => panic!("Unexpected event"),
7097         }
7098
7099         assert_ne!(as_prev_commitment_tx, as_last_commitment_tx);
7100         // Fail the 2 dust-HTLCs, move their failure in maturation buffer (htlc_updated_waiting_threshold_conf)
7101         if announce_latest {
7102                 mine_transaction(&nodes[0], &as_last_commitment_tx[0]);
7103         } else {
7104                 mine_transaction(&nodes[0], &as_prev_commitment_tx[0]);
7105         }
7106
7107         check_closed_broadcast!(nodes[0], true);
7108         check_added_monitors!(nodes[0], 1);
7109         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
7110
7111         assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7112         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7113         let events = nodes[0].node.get_and_clear_pending_events();
7114         // Only 2 PaymentPathFailed events should show up, over-dust HTLC has to be failed by timeout tx
7115         assert_eq!(events.len(), 4);
7116         let mut first_failed = false;
7117         for event in events {
7118                 match event {
7119                         Event::PaymentPathFailed { payment_hash, .. } => {
7120                                 if payment_hash == payment_hash_1 {
7121                                         assert!(!first_failed);
7122                                         first_failed = true;
7123                                 } else {
7124                                         assert_eq!(payment_hash, payment_hash_2);
7125                                 }
7126                         },
7127                         Event::PaymentFailed { .. } => {}
7128                         _ => panic!("Unexpected event"),
7129                 }
7130         }
7131 }
7132
7133 #[test]
7134 fn test_failure_delay_dust_htlc_local_commitment() {
7135         do_test_failure_delay_dust_htlc_local_commitment(true);
7136         do_test_failure_delay_dust_htlc_local_commitment(false);
7137 }
7138
7139 fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
7140         // Outbound HTLC-failure updates must be cancelled if we get a reorg before we reach ANTI_REORG_DELAY.
7141         // Broadcast of revoked remote commitment tx, trigger failure-update of dust/non-dust HTLCs
7142         // Broadcast of remote commitment tx, trigger failure-update of dust-HTLCs
7143         // Broadcast of timeout tx on remote commitment tx, trigger failure-udate of non-dust HTLCs
7144         // Broadcast of local commitment tx, trigger failure-update of dust-HTLCs
7145         // Broadcast of HTLC-timeout tx on local commitment tx, trigger failure-update of non-dust HTLCs
7146
7147         let chanmon_cfgs = create_chanmon_cfgs(3);
7148         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
7149         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
7150         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
7151         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
7152
7153         let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
7154                 .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
7155
7156         let (_payment_preimage_1, dust_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
7157         let (_payment_preimage_2, non_dust_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
7158
7159         let as_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
7160         let bs_commitment_tx = get_local_commitment_txn!(nodes[1], chan.2);
7161
7162         // We revoked bs_commitment_tx
7163         if revoked {
7164                 let (payment_preimage_3, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
7165                 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
7166         }
7167
7168         let mut timeout_tx = Vec::new();
7169         if local {
7170                 // We fail dust-HTLC 1 by broadcast of local commitment tx
7171                 mine_transaction(&nodes[0], &as_commitment_tx[0]);
7172                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
7173                 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7174                 expect_payment_failed!(nodes[0], dust_hash, false);
7175
7176                 connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS - ANTI_REORG_DELAY);
7177                 check_closed_broadcast!(nodes[0], true);
7178                 check_added_monitors!(nodes[0], 1);
7179                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7180                 timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
7181                 assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
7182                 // We fail non-dust-HTLC 2 by broadcast of local HTLC-timeout tx on local commitment tx
7183                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7184                 mine_transaction(&nodes[0], &timeout_tx[0]);
7185                 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7186                 expect_payment_failed!(nodes[0], non_dust_hash, false);
7187         } else {
7188                 // We fail dust-HTLC 1 by broadcast of remote commitment tx. If revoked, fail also non-dust HTLC
7189                 mine_transaction(&nodes[0], &bs_commitment_tx[0]);
7190                 check_closed_broadcast!(nodes[0], true);
7191                 check_added_monitors!(nodes[0], 1);
7192                 check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
7193                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7194
7195                 connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
7196                 timeout_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().drain(..)
7197                         .filter(|tx| tx.input[0].previous_output.txid == bs_commitment_tx[0].txid()).collect();
7198                 check_spends!(timeout_tx[0], bs_commitment_tx[0]);
7199                 // For both a revoked or non-revoked commitment transaction, after ANTI_REORG_DELAY the
7200                 // dust HTLC should have been failed.
7201                 expect_payment_failed!(nodes[0], dust_hash, false);
7202
7203                 if !revoked {
7204                         assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
7205                 } else {
7206                         assert_eq!(timeout_tx[0].lock_time.to_consensus_u32(), 11);
7207                 }
7208                 // We fail non-dust-HTLC 2 by broadcast of local timeout/revocation-claim tx
7209                 mine_transaction(&nodes[0], &timeout_tx[0]);
7210                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7211                 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7212                 expect_payment_failed!(nodes[0], non_dust_hash, false);
7213         }
7214 }
7215
7216 #[test]
7217 fn test_sweep_outbound_htlc_failure_update() {
7218         do_test_sweep_outbound_htlc_failure_update(false, true);
7219         do_test_sweep_outbound_htlc_failure_update(false, false);
7220         do_test_sweep_outbound_htlc_failure_update(true, false);
7221 }
7222
7223 #[test]
7224 fn test_user_configurable_csv_delay() {
7225         // We test our channel constructors yield errors when we pass them absurd csv delay
7226
7227         let mut low_our_to_self_config = UserConfig::default();
7228         low_our_to_self_config.channel_handshake_config.our_to_self_delay = 6;
7229         let mut high_their_to_self_config = UserConfig::default();
7230         high_their_to_self_config.channel_handshake_limits.their_to_self_delay = 100;
7231         let user_cfgs = [Some(high_their_to_self_config.clone()), None];
7232         let chanmon_cfgs = create_chanmon_cfgs(2);
7233         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7234         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
7235         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7236
7237         // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in OutboundV1Channel::new()
7238         if let Err(error) = OutboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
7239                 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[1].node.init_features(), 1000000, 1000000, 0,
7240                 &low_our_to_self_config, 0, 42, None)
7241         {
7242                 match error {
7243                         APIError::APIMisuseError { err } => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); },
7244                         _ => panic!("Unexpected event"),
7245                 }
7246         } else { assert!(false) }
7247
7248         // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in InboundV1Channel::new()
7249         nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
7250         let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
7251         open_channel.common_fields.to_self_delay = 200;
7252         if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
7253                 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
7254                 &low_our_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
7255         {
7256                 match error {
7257                         ChannelError::Close(err) => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str()));  },
7258                         _ => panic!("Unexpected event"),
7259                 }
7260         } else { assert!(false); }
7261
7262         // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Chanel::accept_channel()
7263         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
7264         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
7265         let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
7266         accept_channel.common_fields.to_self_delay = 200;
7267         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
7268         let reason_msg;
7269         if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] {
7270                 match action {
7271                         &ErrorAction::SendErrorMessage { ref msg } => {
7272                                 assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(msg.data.as_str()));
7273                                 reason_msg = msg.data.clone();
7274                         },
7275                         _ => { panic!(); }
7276                 }
7277         } else { panic!(); }
7278         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg }, [nodes[1].node.get_our_node_id()], 1000000);
7279
7280         // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new()
7281         nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
7282         let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
7283         open_channel.common_fields.to_self_delay = 200;
7284         if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
7285                 &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
7286                 &high_their_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
7287         {
7288                 match error {
7289                         ChannelError::Close(err) => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(err.as_str())); },
7290                         _ => panic!("Unexpected event"),
7291                 }
7292         } else { assert!(false); }
7293 }
7294
7295 #[test]
7296 fn test_check_htlc_underpaying() {
7297         // Send payment through A -> B but A is maliciously
7298         // sending a probe payment (i.e less than expected value0
7299         // to B, B should refuse payment.
7300
7301         let chanmon_cfgs = create_chanmon_cfgs(2);
7302         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7303         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7304         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7305
7306         // Create some initial channels
7307         create_announced_chan_between_nodes(&nodes, 0, 1);
7308
7309         let scorer = test_utils::TestScorer::new();
7310         let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
7311         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
7312                 .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
7313         let route_params = RouteParameters::from_payment_params_and_value(payment_params, 10_000);
7314         let route = get_route(&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(),
7315                 None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
7316         let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]);
7317         let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, None).unwrap();
7318         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
7319                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
7320         check_added_monitors!(nodes[0], 1);
7321
7322         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
7323         assert_eq!(events.len(), 1);
7324         let mut payment_event = SendEvent::from_event(events.pop().unwrap());
7325         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
7326         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
7327
7328         // Note that we first have to wait a random delay before processing the receipt of the HTLC,
7329         // and then will wait a second random delay before failing the HTLC back:
7330         expect_pending_htlcs_forwardable!(nodes[1]);
7331         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
7332
7333         // Node 3 is expecting payment of 100_000 but received 10_000,
7334         // it should fail htlc like we didn't know the preimage.
7335         nodes[1].node.process_pending_htlc_forwards();
7336
7337         let events = nodes[1].node.get_and_clear_pending_msg_events();
7338         assert_eq!(events.len(), 1);
7339         let (update_fail_htlc, commitment_signed) = match events[0] {
7340                 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
7341                         assert!(update_add_htlcs.is_empty());
7342                         assert!(update_fulfill_htlcs.is_empty());
7343                         assert_eq!(update_fail_htlcs.len(), 1);
7344                         assert!(update_fail_malformed_htlcs.is_empty());
7345                         assert!(update_fee.is_none());
7346                         (update_fail_htlcs[0].clone(), commitment_signed)
7347                 },
7348                 _ => panic!("Unexpected event"),
7349         };
7350         check_added_monitors!(nodes[1], 1);
7351
7352         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlc);
7353         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
7354
7355         // 10_000 msat as u64, followed by a height of CHAN_CONFIRM_DEPTH as u32
7356         let mut expected_failure_data = (10_000 as u64).to_be_bytes().to_vec();
7357         expected_failure_data.extend_from_slice(&CHAN_CONFIRM_DEPTH.to_be_bytes());
7358         expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000|15, &expected_failure_data[..]);
7359 }
7360
7361 #[test]
7362 fn test_announce_disable_channels() {
7363         // Create 2 channels between A and B. Disconnect B. Call timer_tick_occurred and check for generated
7364         // ChannelUpdate. Reconnect B, reestablish and check there is non-generated ChannelUpdate.
7365
7366         let chanmon_cfgs = create_chanmon_cfgs(2);
7367         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7368         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7369         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7370
7371         // Connect a dummy node for proper future events broadcasting
7372         connect_dummy_node(&nodes[0]);
7373
7374         create_announced_chan_between_nodes(&nodes, 0, 1);
7375         create_announced_chan_between_nodes(&nodes, 1, 0);
7376         create_announced_chan_between_nodes(&nodes, 0, 1);
7377
7378         // Disconnect peers
7379         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
7380         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
7381
7382         for _ in 0..DISABLE_GOSSIP_TICKS + 1 {
7383                 nodes[0].node.timer_tick_occurred();
7384         }
7385         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
7386         assert_eq!(msg_events.len(), 3);
7387         let mut chans_disabled = new_hash_map();
7388         for e in msg_events {
7389                 match e {
7390                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
7391                                 assert_eq!(msg.contents.flags & (1<<1), 1<<1); // The "channel disabled" bit should be set
7392                                 // Check that each channel gets updated exactly once
7393                                 if chans_disabled.insert(msg.contents.short_channel_id, msg.contents.timestamp).is_some() {
7394                                         panic!("Generated ChannelUpdate for wrong chan!");
7395                                 }
7396                         },
7397                         _ => panic!("Unexpected event"),
7398                 }
7399         }
7400         // Reconnect peers
7401         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
7402                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
7403         }, true).unwrap();
7404         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
7405         assert_eq!(reestablish_1.len(), 3);
7406         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
7407                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
7408         }, false).unwrap();
7409         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
7410         assert_eq!(reestablish_2.len(), 3);
7411
7412         // Reestablish chan_1
7413         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
7414         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7415         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
7416         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7417         // Reestablish chan_2
7418         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[1]);
7419         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7420         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[1]);
7421         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7422         // Reestablish chan_3
7423         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[2]);
7424         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7425         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[2]);
7426         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7427
7428         for _ in 0..ENABLE_GOSSIP_TICKS {
7429                 nodes[0].node.timer_tick_occurred();
7430         }
7431         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7432         nodes[0].node.timer_tick_occurred();
7433         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
7434         assert_eq!(msg_events.len(), 3);
7435         for e in msg_events {
7436                 match e {
7437                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
7438                                 assert_eq!(msg.contents.flags & (1<<1), 0); // The "channel disabled" bit should be off
7439                                 match chans_disabled.remove(&msg.contents.short_channel_id) {
7440                                         // Each update should have a higher timestamp than the previous one, replacing
7441                                         // the old one.
7442                                         Some(prev_timestamp) => assert!(msg.contents.timestamp > prev_timestamp),
7443                                         None => panic!("Generated ChannelUpdate for wrong chan!"),
7444                                 }
7445                         },
7446                         _ => panic!("Unexpected event"),
7447                 }
7448         }
7449         // Check that each channel gets updated exactly once
7450         assert!(chans_disabled.is_empty());
7451 }
7452
7453 #[test]
7454 fn test_bump_penalty_txn_on_revoked_commitment() {
7455         // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to be sure
7456         // we're able to claim outputs on revoked commitment transaction before timelocks expiration
7457
7458         let chanmon_cfgs = create_chanmon_cfgs(2);
7459         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7460         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7461         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7462
7463         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7464
7465         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
7466         let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 30)
7467                 .with_bolt11_features(nodes[0].node.bolt11_invoice_features()).unwrap();
7468         let (route,_, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], payment_params, 3000000);
7469         send_along_route(&nodes[1], route, &vec!(&nodes[0])[..], 3000000);
7470
7471         let revoked_txn = get_local_commitment_txn!(nodes[0], chan.2);
7472         // Revoked commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
7473         assert_eq!(revoked_txn[0].output.len(), 4);
7474         assert_eq!(revoked_txn[0].input.len(), 1);
7475         assert_eq!(revoked_txn[0].input[0].previous_output.txid, chan.3.txid());
7476         let revoked_txid = revoked_txn[0].txid();
7477
7478         let mut penalty_sum = 0;
7479         for outp in revoked_txn[0].output.iter() {
7480                 if outp.script_pubkey.is_v0_p2wsh() {
7481                         penalty_sum += outp.value;
7482                 }
7483         }
7484
7485         // Connect blocks to change height_timer range to see if we use right soonest_timelock
7486         let header_114 = connect_blocks(&nodes[1], 14);
7487
7488         // Actually revoke tx by claiming a HTLC
7489         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
7490         connect_block(&nodes[1], &create_dummy_block(header_114, 42, vec![revoked_txn[0].clone()]));
7491         check_added_monitors!(nodes[1], 1);
7492
7493         // One or more justice tx should have been broadcast, check it
7494         let penalty_1;
7495         let feerate_1;
7496         {
7497                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7498                 assert_eq!(node_txn.len(), 1); // justice tx (broadcasted from ChannelMonitor)
7499                 assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7500                 assert_eq!(node_txn[0].output.len(), 1);
7501                 check_spends!(node_txn[0], revoked_txn[0]);
7502                 let fee_1 = penalty_sum - node_txn[0].output[0].value;
7503                 feerate_1 = fee_1 * 1000 / node_txn[0].weight().to_wu();
7504                 penalty_1 = node_txn[0].txid();
7505                 node_txn.clear();
7506         };
7507
7508         // After exhaustion of height timer, a new bumped justice tx should have been broadcast, check it
7509         connect_blocks(&nodes[1], 15);
7510         let mut penalty_2 = penalty_1;
7511         let mut feerate_2 = 0;
7512         {
7513                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7514                 assert_eq!(node_txn.len(), 1);
7515                 if node_txn[0].input[0].previous_output.txid == revoked_txid {
7516                         assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7517                         assert_eq!(node_txn[0].output.len(), 1);
7518                         check_spends!(node_txn[0], revoked_txn[0]);
7519                         penalty_2 = node_txn[0].txid();
7520                         // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
7521                         assert_ne!(penalty_2, penalty_1);
7522                         let fee_2 = penalty_sum - node_txn[0].output[0].value;
7523                         feerate_2 = fee_2 * 1000 / node_txn[0].weight().to_wu();
7524                         // Verify 25% bump heuristic
7525                         assert!(feerate_2 * 100 >= feerate_1 * 125);
7526                         node_txn.clear();
7527                 }
7528         }
7529         assert_ne!(feerate_2, 0);
7530
7531         // After exhaustion of height timer for a 2nd time, a new bumped justice tx should have been broadcast, check it
7532         connect_blocks(&nodes[1], 1);
7533         let penalty_3;
7534         let mut feerate_3 = 0;
7535         {
7536                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7537                 assert_eq!(node_txn.len(), 1);
7538                 if node_txn[0].input[0].previous_output.txid == revoked_txid {
7539                         assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
7540                         assert_eq!(node_txn[0].output.len(), 1);
7541                         check_spends!(node_txn[0], revoked_txn[0]);
7542                         penalty_3 = node_txn[0].txid();
7543                         // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
7544                         assert_ne!(penalty_3, penalty_2);
7545                         let fee_3 = penalty_sum - node_txn[0].output[0].value;
7546                         feerate_3 = fee_3 * 1000 / node_txn[0].weight().to_wu();
7547                         // Verify 25% bump heuristic
7548                         assert!(feerate_3 * 100 >= feerate_2 * 125);
7549                         node_txn.clear();
7550                 }
7551         }
7552         assert_ne!(feerate_3, 0);
7553
7554         nodes[1].node.get_and_clear_pending_events();
7555         nodes[1].node.get_and_clear_pending_msg_events();
7556 }
7557
7558 #[test]
7559 fn test_bump_penalty_txn_on_revoked_htlcs() {
7560         // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to sure
7561         // we're able to claim outputs on revoked HTLC transactions before timelocks expiration
7562
7563         let mut chanmon_cfgs = create_chanmon_cfgs(2);
7564         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
7565         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7566         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7567         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7568
7569         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7570         // Lock HTLC in both directions (using a slightly lower CLTV delay to provide timely RBF bumps)
7571         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 50).with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
7572         let scorer = test_utils::TestScorer::new();
7573         let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
7574         let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
7575         let route = get_route(&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(), None,
7576                 nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
7577         let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 3_000_000).0;
7578         let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 50)
7579                 .with_bolt11_features(nodes[0].node.bolt11_invoice_features()).unwrap();
7580         let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
7581         let route = get_route(&nodes[1].node.get_our_node_id(), &route_params, &nodes[1].network_graph.read_only(), None,
7582                 nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
7583         let failed_payment_hash = send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000).1;
7584
7585         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
7586         assert_eq!(revoked_local_txn[0].input.len(), 1);
7587         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
7588
7589         // Revoke local commitment tx
7590         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
7591
7592         // B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx
7593         connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone()]));
7594         check_closed_broadcast!(nodes[1], true);
7595         check_added_monitors!(nodes[1], 1);
7596         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000);
7597         connect_blocks(&nodes[1], 50); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
7598
7599         let revoked_htlc_txn = {
7600                 let txn = nodes[1].tx_broadcaster.unique_txn_broadcast();
7601                 assert_eq!(txn.len(), 2);
7602
7603                 assert_eq!(txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
7604                 assert_eq!(txn[0].input.len(), 1);
7605                 check_spends!(txn[0], revoked_local_txn[0]);
7606
7607                 assert_eq!(txn[1].input.len(), 1);
7608                 assert_eq!(txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
7609                 assert_eq!(txn[1].output.len(), 1);
7610                 check_spends!(txn[1], revoked_local_txn[0]);
7611
7612                 txn
7613         };
7614
7615         // Broadcast set of revoked txn on A
7616         let hash_128 = connect_blocks(&nodes[0], 40);
7617         let block_11 = create_dummy_block(hash_128, 42, vec![revoked_local_txn[0].clone()]);
7618         connect_block(&nodes[0], &block_11);
7619         let block_129 = create_dummy_block(block_11.block_hash(), 42, vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()]);
7620         connect_block(&nodes[0], &block_129);
7621         let events = nodes[0].node.get_and_clear_pending_events();
7622         expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: failed_payment_hash }]);
7623         match events.last().unwrap() {
7624                 Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
7625                 _ => panic!("Unexpected event"),
7626         }
7627         let first;
7628         let feerate_1;
7629         let penalty_txn;
7630         {
7631                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7632                 assert_eq!(node_txn.len(), 4); // 3 penalty txn on revoked commitment tx + 1 penalty tnx on revoked HTLC txn
7633                 // Verify claim tx are spending revoked HTLC txn
7634
7635                 // node_txn 0-2 each spend a separate revoked output from revoked_local_txn[0]
7636                 // Note that node_txn[0] and node_txn[1] are bogus - they double spend the revoked_htlc_txn
7637                 // which are included in the same block (they are broadcasted because we scan the
7638                 // transactions linearly and generate claims as we go, they likely should be removed in the
7639                 // future).
7640                 assert_eq!(node_txn[0].input.len(), 1);
7641                 check_spends!(node_txn[0], revoked_local_txn[0]);
7642                 assert_eq!(node_txn[1].input.len(), 1);
7643                 check_spends!(node_txn[1], revoked_local_txn[0]);
7644                 assert_eq!(node_txn[2].input.len(), 1);
7645                 check_spends!(node_txn[2], revoked_local_txn[0]);
7646
7647                 // Each of the three justice transactions claim a separate (single) output of the three
7648                 // available, which we check here:
7649                 assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
7650                 assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output);
7651                 assert_ne!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output);
7652
7653                 assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[1].input[0].previous_output);
7654                 assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
7655
7656                 // node_txn[3] spends the revoked outputs from the revoked_htlc_txn (which only have one
7657                 // output, checked above).
7658                 assert_eq!(node_txn[3].input.len(), 2);
7659                 assert_eq!(node_txn[3].output.len(), 1);
7660                 check_spends!(node_txn[3], revoked_htlc_txn[0], revoked_htlc_txn[1]);
7661
7662                 first = node_txn[3].txid();
7663                 // Store both feerates for later comparison
7664                 let fee_1 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[3].output[0].value;
7665                 feerate_1 = fee_1 * 1000 / node_txn[3].weight().to_wu();
7666                 penalty_txn = vec![node_txn[2].clone()];
7667                 node_txn.clear();
7668         }
7669
7670         // Connect one more block to see if bumped penalty are issued for HTLC txn
7671         let block_130 = create_dummy_block(block_129.block_hash(), 42, penalty_txn);
7672         connect_block(&nodes[0], &block_130);
7673         let block_131 = create_dummy_block(block_130.block_hash(), 42, Vec::new());
7674         connect_block(&nodes[0], &block_131);
7675
7676         // Few more blocks to confirm penalty txn
7677         connect_blocks(&nodes[0], 4);
7678         assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
7679         let header_144 = connect_blocks(&nodes[0], 9);
7680         let node_txn = {
7681                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7682                 assert_eq!(node_txn.len(), 1);
7683
7684                 assert_eq!(node_txn[0].input.len(), 2);
7685                 check_spends!(node_txn[0], revoked_htlc_txn[0], revoked_htlc_txn[1]);
7686                 // Verify bumped tx is different and 25% bump heuristic
7687                 assert_ne!(first, node_txn[0].txid());
7688                 let fee_2 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[0].output[0].value;
7689                 let feerate_2 = fee_2 * 1000 / node_txn[0].weight().to_wu();
7690                 assert!(feerate_2 * 100 > feerate_1 * 125);
7691                 let txn = vec![node_txn[0].clone()];
7692                 node_txn.clear();
7693                 txn
7694         };
7695         // Broadcast claim txn and confirm blocks to avoid further bumps on this outputs
7696         connect_block(&nodes[0], &create_dummy_block(header_144, 42, node_txn));
7697         connect_blocks(&nodes[0], 20);
7698         {
7699                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7700                 // We verify than no new transaction has been broadcast because previously
7701                 // we were buggy on this exact behavior by not tracking for monitoring remote HTLC outputs (see #411)
7702                 // which means we wouldn't see a spend of them by a justice tx and bumped justice tx
7703                 // were generated forever instead of safe cleaning after confirmation and ANTI_REORG_SAFE_DELAY blocks.
7704                 // Enforce spending of revoked htlc output by claiming transaction remove request as expected and dry
7705                 // up bumped justice generation.
7706                 assert_eq!(node_txn.len(), 0);
7707                 node_txn.clear();
7708         }
7709         check_closed_broadcast!(nodes[0], true);
7710         check_added_monitors!(nodes[0], 1);
7711 }
7712
7713 #[test]
7714 fn test_bump_penalty_txn_on_remote_commitment() {
7715         // In case of claim txn with too low feerates for getting into mempools, RBF-bump them to be sure
7716         // we're able to claim outputs on remote commitment transaction before timelocks expiration
7717
7718         // Create 2 HTLCs
7719         // Provide preimage for one
7720         // Check aggregation
7721
7722         let chanmon_cfgs = create_chanmon_cfgs(2);
7723         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7724         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7725         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7726
7727         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7728         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
7729         route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
7730
7731         // Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
7732         let remote_txn = get_local_commitment_txn!(nodes[0], chan.2);
7733         assert_eq!(remote_txn[0].output.len(), 4);
7734         assert_eq!(remote_txn[0].input.len(), 1);
7735         assert_eq!(remote_txn[0].input[0].previous_output.txid, chan.3.txid());
7736
7737         // Claim a HTLC without revocation (provide B monitor with preimage)
7738         nodes[1].node.claim_funds(payment_preimage);
7739         expect_payment_claimed!(nodes[1], payment_hash, 3_000_000);
7740         mine_transaction(&nodes[1], &remote_txn[0]);
7741         check_added_monitors!(nodes[1], 2);
7742         connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
7743
7744         // One or more claim tx should have been broadcast, check it
7745         let timeout;
7746         let preimage;
7747         let preimage_bump;
7748         let feerate_timeout;
7749         let feerate_preimage;
7750         {
7751                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7752                 // 3 transactions including:
7753                 //   preimage and timeout sweeps from remote commitment + preimage sweep bump
7754                 assert_eq!(node_txn.len(), 3);
7755                 assert_eq!(node_txn[0].input.len(), 1);
7756                 assert_eq!(node_txn[1].input.len(), 1);
7757                 assert_eq!(node_txn[2].input.len(), 1);
7758                 check_spends!(node_txn[0], remote_txn[0]);
7759                 check_spends!(node_txn[1], remote_txn[0]);
7760                 check_spends!(node_txn[2], remote_txn[0]);
7761
7762                 preimage = node_txn[0].txid();
7763                 let index = node_txn[0].input[0].previous_output.vout;
7764                 let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
7765                 feerate_preimage = fee * 1000 / node_txn[0].weight().to_wu();
7766
7767                 let (preimage_bump_tx, timeout_tx) = if node_txn[2].input[0].previous_output == node_txn[0].input[0].previous_output {
7768                         (node_txn[2].clone(), node_txn[1].clone())
7769                 } else {
7770                         (node_txn[1].clone(), node_txn[2].clone())
7771                 };
7772
7773                 preimage_bump = preimage_bump_tx;
7774                 check_spends!(preimage_bump, remote_txn[0]);
7775                 assert_eq!(node_txn[0].input[0].previous_output, preimage_bump.input[0].previous_output);
7776
7777                 timeout = timeout_tx.txid();
7778                 let index = timeout_tx.input[0].previous_output.vout;
7779                 let fee = remote_txn[0].output[index as usize].value - timeout_tx.output[0].value;
7780                 feerate_timeout = fee * 1000 / timeout_tx.weight().to_wu();
7781
7782                 node_txn.clear();
7783         };
7784         assert_ne!(feerate_timeout, 0);
7785         assert_ne!(feerate_preimage, 0);
7786
7787         // After exhaustion of height timer, new bumped claim txn should have been broadcast, check it
7788         connect_blocks(&nodes[1], 1);
7789         {
7790                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
7791                 assert_eq!(node_txn.len(), 1);
7792                 assert_eq!(node_txn[0].input.len(), 1);
7793                 assert_eq!(preimage_bump.input.len(), 1);
7794                 check_spends!(node_txn[0], remote_txn[0]);
7795                 check_spends!(preimage_bump, remote_txn[0]);
7796
7797                 let index = preimage_bump.input[0].previous_output.vout;
7798                 let fee = remote_txn[0].output[index as usize].value - preimage_bump.output[0].value;
7799                 let new_feerate = fee * 1000 / preimage_bump.weight().to_wu();
7800                 assert!(new_feerate * 100 > feerate_timeout * 125);
7801                 assert_ne!(timeout, preimage_bump.txid());
7802
7803                 let index = node_txn[0].input[0].previous_output.vout;
7804                 let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
7805                 let new_feerate = fee * 1000 / node_txn[0].weight().to_wu();
7806                 assert!(new_feerate * 100 > feerate_preimage * 125);
7807                 assert_ne!(preimage, node_txn[0].txid());
7808
7809                 node_txn.clear();
7810         }
7811
7812         nodes[1].node.get_and_clear_pending_events();
7813         nodes[1].node.get_and_clear_pending_msg_events();
7814 }
7815
7816 #[test]
7817 fn test_counterparty_raa_skip_no_crash() {
7818         // Previously, if our counterparty sent two RAAs in a row without us having provided a
7819         // commitment transaction, we would have happily carried on and provided them the next
7820         // commitment transaction based on one RAA forward. This would probably eventually have led to
7821         // channel closure, but it would not have resulted in funds loss. Still, our
7822         // TestChannelSigner would have panicked as it doesn't like jumps into the future. Here, we
7823         // check simply that the channel is closed in response to such an RAA, but don't check whether
7824         // we decide to punish our counterparty for revoking their funds (as we don't currently
7825         // implement that).
7826         let chanmon_cfgs = create_chanmon_cfgs(2);
7827         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7828         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7829         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7830         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
7831
7832         let per_commitment_secret;
7833         let next_per_commitment_point;
7834         {
7835                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
7836                 let mut guard = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
7837                 let keys = guard.channel_by_id.get_mut(&channel_id).map(
7838                         |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
7839                 ).flatten().unwrap().get_signer();
7840
7841                 const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
7842
7843                 // Make signer believe we got a counterparty signature, so that it allows the revocation
7844                 keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
7845                 per_commitment_secret = keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
7846
7847                 // Must revoke without gaps
7848                 keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
7849                 keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
7850
7851                 keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
7852                 next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
7853                         &SecretKey::from_slice(&keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
7854         }
7855
7856         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(),
7857                 &msgs::RevokeAndACK {
7858                         channel_id,
7859                         per_commitment_secret,
7860                         next_per_commitment_point,
7861                         #[cfg(taproot)]
7862                         next_local_nonce: None,
7863                 });
7864         assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
7865         check_added_monitors!(nodes[1], 1);
7866         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() }
7867                 , [nodes[0].node.get_our_node_id()], 100000);
7868 }
7869
7870 #[test]
7871 fn test_bump_txn_sanitize_tracking_maps() {
7872         // Sanitizing pendning_claim_request and claimable_outpoints used to be buggy,
7873         // verify we clean then right after expiration of ANTI_REORG_DELAY.
7874
7875         let chanmon_cfgs = create_chanmon_cfgs(2);
7876         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7877         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7878         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7879
7880         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
7881         // Lock HTLC in both directions
7882         let (payment_preimage_1, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000);
7883         let (_, payment_hash_2, ..) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000);
7884
7885         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
7886         assert_eq!(revoked_local_txn[0].input.len(), 1);
7887         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
7888
7889         // Revoke local commitment tx
7890         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
7891
7892         // Broadcast set of revoked txn on A
7893         connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH);
7894         expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[0], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
7895         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
7896
7897         mine_transaction(&nodes[0], &revoked_local_txn[0]);
7898         check_closed_broadcast!(nodes[0], true);
7899         check_added_monitors!(nodes[0], 1);
7900         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 1000000);
7901         let penalty_txn = {
7902                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7903                 assert_eq!(node_txn.len(), 3); //ChannelMonitor: justice txn * 3
7904                 check_spends!(node_txn[0], revoked_local_txn[0]);
7905                 check_spends!(node_txn[1], revoked_local_txn[0]);
7906                 check_spends!(node_txn[2], revoked_local_txn[0]);
7907                 let penalty_txn = vec![node_txn[0].clone(), node_txn[1].clone(), node_txn[2].clone()];
7908                 node_txn.clear();
7909                 penalty_txn
7910         };
7911         connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, penalty_txn));
7912         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7913         {
7914                 let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(OutPoint { txid: chan.3.txid(), index: 0 }).unwrap();
7915                 assert!(monitor.inner.lock().unwrap().onchain_tx_handler.pending_claim_requests.is_empty());
7916                 assert!(monitor.inner.lock().unwrap().onchain_tx_handler.claimable_outpoints.is_empty());
7917         }
7918 }
7919
7920 #[test]
7921 fn test_channel_conf_timeout() {
7922         // Tests that, for inbound channels, we give up on them if the funding transaction does not
7923         // confirm within 2016 blocks, as recommended by BOLT 2.
7924         let chanmon_cfgs = create_chanmon_cfgs(2);
7925         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7926         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7927         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7928
7929         let _funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 100_000);
7930
7931         // The outbound node should wait forever for confirmation:
7932         // This matches `channel::FUNDING_CONF_DEADLINE_BLOCKS` and BOLT 2's suggested timeout, thus is
7933         // copied here instead of directly referencing the constant.
7934         connect_blocks(&nodes[0], 2016);
7935         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7936
7937         // The inbound node should fail the channel after exactly 2016 blocks
7938         connect_blocks(&nodes[1], 2015);
7939         check_added_monitors!(nodes[1], 0);
7940         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7941
7942         connect_blocks(&nodes[1], 1);
7943         check_added_monitors!(nodes[1], 1);
7944         check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut, [nodes[0].node.get_our_node_id()], 1000000);
7945         let close_ev = nodes[1].node.get_and_clear_pending_msg_events();
7946         assert_eq!(close_ev.len(), 1);
7947         match close_ev[0] {
7948                 MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { ref msg }, ref node_id } => {
7949                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
7950                         assert_eq!(msg.as_ref().unwrap().data, "Channel closed because funding transaction failed to confirm within 2016 blocks");
7951                 },
7952                 _ => panic!("Unexpected event"),
7953         }
7954 }
7955
7956 #[test]
7957 fn test_override_channel_config() {
7958         let chanmon_cfgs = create_chanmon_cfgs(2);
7959         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7960         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7961         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7962
7963         // Node0 initiates a channel to node1 using the override config.
7964         let mut override_config = UserConfig::default();
7965         override_config.channel_handshake_config.our_to_self_delay = 200;
7966
7967         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, None, Some(override_config)).unwrap();
7968
7969         // Assert the channel created by node0 is using the override config.
7970         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7971         assert_eq!(res.common_fields.channel_flags, 0);
7972         assert_eq!(res.common_fields.to_self_delay, 200);
7973 }
7974
7975 #[test]
7976 fn test_override_0msat_htlc_minimum() {
7977         let mut zero_config = UserConfig::default();
7978         zero_config.channel_handshake_config.our_htlc_minimum_msat = 0;
7979         let chanmon_cfgs = create_chanmon_cfgs(2);
7980         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7981         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(zero_config.clone())]);
7982         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7983
7984         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, None, Some(zero_config)).unwrap();
7985         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7986         assert_eq!(res.common_fields.htlc_minimum_msat, 1);
7987
7988         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
7989         let res = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
7990         assert_eq!(res.common_fields.htlc_minimum_msat, 1);
7991 }
7992
7993 #[test]
7994 fn test_channel_update_has_correct_htlc_maximum_msat() {
7995         // Tests that the `ChannelUpdate` message has the correct values for `htlc_maximum_msat` set.
7996         // Bolt 7 specifies that if present `htlc_maximum_msat`:
7997         // 1. MUST be set to less than or equal to the channel capacity. In LDK, this is capped to
7998         // 90% of the `channel_value`.
7999         // 2. MUST be set to less than or equal to the `max_htlc_value_in_flight_msat` received from the peer.
8000
8001         let mut config_30_percent = UserConfig::default();
8002         config_30_percent.channel_handshake_config.announced_channel = true;
8003         config_30_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 30;
8004         let mut config_50_percent = UserConfig::default();
8005         config_50_percent.channel_handshake_config.announced_channel = true;
8006         config_50_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 50;
8007         let mut config_95_percent = UserConfig::default();
8008         config_95_percent.channel_handshake_config.announced_channel = true;
8009         config_95_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 95;
8010         let mut config_100_percent = UserConfig::default();
8011         config_100_percent.channel_handshake_config.announced_channel = true;
8012         config_100_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100;
8013
8014         let chanmon_cfgs = create_chanmon_cfgs(4);
8015         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
8016         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[Some(config_30_percent), Some(config_50_percent), Some(config_95_percent), Some(config_100_percent)]);
8017         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
8018
8019         let channel_value_satoshis = 100000;
8020         let channel_value_msat = channel_value_satoshis * 1000;
8021         let channel_value_30_percent_msat = (channel_value_msat as f64 * 0.3) as u64;
8022         let channel_value_50_percent_msat = (channel_value_msat as f64 * 0.5) as u64;
8023         let channel_value_90_percent_msat = (channel_value_msat as f64 * 0.9) as u64;
8024
8025         let (node_0_chan_update, node_1_chan_update, _, _)  = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value_satoshis, 10001);
8026         let (node_2_chan_update, node_3_chan_update, _, _)  = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, channel_value_satoshis, 10001);
8027
8028         // Assert that `node[0]`'s `ChannelUpdate` is capped at 50 percent of the `channel_value`, as
8029         // that's the value of `node[1]`'s `holder_max_htlc_value_in_flight_msat`.
8030         assert_eq!(node_0_chan_update.contents.htlc_maximum_msat, channel_value_50_percent_msat);
8031         // Assert that `node[1]`'s `ChannelUpdate` is capped at 30 percent of the `channel_value`, as
8032         // that's the value of `node[0]`'s `holder_max_htlc_value_in_flight_msat`.
8033         assert_eq!(node_1_chan_update.contents.htlc_maximum_msat, channel_value_30_percent_msat);
8034
8035         // Assert that `node[2]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
8036         // the value of `node[3]`'s `holder_max_htlc_value_in_flight_msat` (100%), exceeds 90% of the
8037         // `channel_value`.
8038         assert_eq!(node_2_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
8039         // Assert that `node[3]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
8040         // the value of `node[2]`'s `holder_max_htlc_value_in_flight_msat` (95%), exceeds 90% of the
8041         // `channel_value`.
8042         assert_eq!(node_3_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
8043 }
8044
8045 #[test]
8046 fn test_manually_accept_inbound_channel_request() {
8047         let mut manually_accept_conf = UserConfig::default();
8048         manually_accept_conf.manually_accept_inbound_channels = true;
8049         let chanmon_cfgs = create_chanmon_cfgs(2);
8050         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8051         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
8052         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8053
8054         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap();
8055         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8056
8057         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
8058
8059         // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
8060         // accepting the inbound channel request.
8061         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
8062
8063         let events = nodes[1].node.get_and_clear_pending_events();
8064         match events[0] {
8065                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
8066                         nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23).unwrap();
8067                 }
8068                 _ => panic!("Unexpected event"),
8069         }
8070
8071         let accept_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
8072         assert_eq!(accept_msg_ev.len(), 1);
8073
8074         match accept_msg_ev[0] {
8075                 MessageSendEvent::SendAcceptChannel { ref node_id, .. } => {
8076                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8077                 }
8078                 _ => panic!("Unexpected event"),
8079         }
8080
8081         nodes[1].node.force_close_broadcasting_latest_txn(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
8082
8083         let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
8084         assert_eq!(close_msg_ev.len(), 1);
8085
8086         let events = nodes[1].node.get_and_clear_pending_events();
8087         match events[0] {
8088                 Event::ChannelClosed { user_channel_id, .. } => {
8089                         assert_eq!(user_channel_id, 23);
8090                 }
8091                 _ => panic!("Unexpected event"),
8092         }
8093 }
8094
8095 #[test]
8096 fn test_manually_reject_inbound_channel_request() {
8097         let mut manually_accept_conf = UserConfig::default();
8098         manually_accept_conf.manually_accept_inbound_channels = true;
8099         let chanmon_cfgs = create_chanmon_cfgs(2);
8100         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8101         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
8102         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8103
8104         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap();
8105         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8106
8107         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
8108
8109         // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
8110         // rejecting the inbound channel request.
8111         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
8112
8113         let events = nodes[1].node.get_and_clear_pending_events();
8114         match events[0] {
8115                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
8116                         nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
8117                 }
8118                 _ => panic!("Unexpected event"),
8119         }
8120
8121         let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
8122         assert_eq!(close_msg_ev.len(), 1);
8123
8124         match close_msg_ev[0] {
8125                 MessageSendEvent::HandleError { ref node_id, .. } => {
8126                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8127                 }
8128                 _ => panic!("Unexpected event"),
8129         }
8130
8131         // There should be no more events to process, as the channel was never opened.
8132         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
8133 }
8134
8135 #[test]
8136 fn test_can_not_accept_inbound_channel_twice() {
8137         let mut manually_accept_conf = UserConfig::default();
8138         manually_accept_conf.manually_accept_inbound_channels = true;
8139         let chanmon_cfgs = create_chanmon_cfgs(2);
8140         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8141         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
8142         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8143
8144         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap();
8145         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8146
8147         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
8148
8149         // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
8150         // accepting the inbound channel request.
8151         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
8152
8153         let events = nodes[1].node.get_and_clear_pending_events();
8154         match events[0] {
8155                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
8156                         nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
8157                         let api_res = nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0);
8158                         match api_res {
8159                                 Err(APIError::APIMisuseError { err }) => {
8160                                         assert_eq!(err, "No such channel awaiting to be accepted.");
8161                                 },
8162                                 Ok(_) => panic!("Channel shouldn't be possible to be accepted twice"),
8163                                 Err(e) => panic!("Unexpected Error {:?}", e),
8164                         }
8165                 }
8166                 _ => panic!("Unexpected event"),
8167         }
8168
8169         // Ensure that the channel wasn't closed after attempting to accept it twice.
8170         let accept_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
8171         assert_eq!(accept_msg_ev.len(), 1);
8172
8173         match accept_msg_ev[0] {
8174                 MessageSendEvent::SendAcceptChannel { ref node_id, .. } => {
8175                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8176                 }
8177                 _ => panic!("Unexpected event"),
8178         }
8179 }
8180
8181 #[test]
8182 fn test_can_not_accept_unknown_inbound_channel() {
8183         let chanmon_cfg = create_chanmon_cfgs(2);
8184         let node_cfg = create_node_cfgs(2, &chanmon_cfg);
8185         let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
8186         let nodes = create_network(2, &node_cfg, &node_chanmgr);
8187
8188         let unknown_channel_id = ChannelId::new_zero();
8189         let api_res = nodes[0].node.accept_inbound_channel(&unknown_channel_id, &nodes[1].node.get_our_node_id(), 0);
8190         match api_res {
8191                 Err(APIError::APIMisuseError { err }) => {
8192                         assert_eq!(err, "No such channel awaiting to be accepted.");
8193                 },
8194                 Ok(_) => panic!("It shouldn't be possible to accept an unkown channel"),
8195                 Err(e) => panic!("Unexpected Error: {:?}", e),
8196         }
8197 }
8198
8199 #[test]
8200 fn test_onion_value_mpp_set_calculation() {
8201         // Test that we use the onion value `amt_to_forward` when
8202         // calculating whether we've reached the `total_msat` of an MPP
8203         // by having a routing node forward more than `amt_to_forward`
8204         // and checking that the receiving node doesn't generate
8205         // a PaymentClaimable event too early
8206         let node_count = 4;
8207         let chanmon_cfgs = create_chanmon_cfgs(node_count);
8208         let node_cfgs = create_node_cfgs(node_count, &chanmon_cfgs);
8209         let node_chanmgrs = create_node_chanmgrs(node_count, &node_cfgs, &vec![None; node_count]);
8210         let mut nodes = create_network(node_count, &node_cfgs, &node_chanmgrs);
8211
8212         let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8213         let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
8214         let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
8215         let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
8216
8217         let total_msat = 100_000;
8218         let expected_paths: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]];
8219         let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], total_msat);
8220         let sample_path = route.paths.pop().unwrap();
8221
8222         let mut path_1 = sample_path.clone();
8223         path_1.hops[0].pubkey = nodes[1].node.get_our_node_id();
8224         path_1.hops[0].short_channel_id = chan_1_id;
8225         path_1.hops[1].pubkey = nodes[3].node.get_our_node_id();
8226         path_1.hops[1].short_channel_id = chan_3_id;
8227         path_1.hops[1].fee_msat = 100_000;
8228         route.paths.push(path_1);
8229
8230         let mut path_2 = sample_path.clone();
8231         path_2.hops[0].pubkey = nodes[2].node.get_our_node_id();
8232         path_2.hops[0].short_channel_id = chan_2_id;
8233         path_2.hops[1].pubkey = nodes[3].node.get_our_node_id();
8234         path_2.hops[1].short_channel_id = chan_4_id;
8235         path_2.hops[1].fee_msat = 1_000;
8236         route.paths.push(path_2);
8237
8238         // Send payment
8239         let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes());
8240         let onion_session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
8241                 RecipientOnionFields::secret_only(our_payment_secret), payment_id, &route).unwrap();
8242         nodes[0].node.test_send_payment_internal(&route, our_payment_hash,
8243                 RecipientOnionFields::secret_only(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap();
8244         check_added_monitors!(nodes[0], expected_paths.len());
8245
8246         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8247         assert_eq!(events.len(), expected_paths.len());
8248
8249         // First path
8250         let ev = remove_first_msg_event_to_node(&expected_paths[0][0].node.get_our_node_id(), &mut events);
8251         let mut payment_event = SendEvent::from_event(ev);
8252         let mut prev_node = &nodes[0];
8253
8254         for (idx, &node) in expected_paths[0].iter().enumerate() {
8255                 assert_eq!(node.node.get_our_node_id(), payment_event.node_id);
8256
8257                 if idx == 0 { // routing node
8258                         let session_priv = [3; 32];
8259                         let height = nodes[0].best_block_info().1;
8260                         let session_priv = SecretKey::from_slice(&session_priv).unwrap();
8261                         let mut onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
8262                         let recipient_onion_fields = RecipientOnionFields::secret_only(our_payment_secret);
8263                         let (mut onion_payloads, _, _) = onion_utils::build_onion_payloads(&route.paths[0], 100_000,
8264                                 &recipient_onion_fields, height + 1, &None).unwrap();
8265                         // Edit amt_to_forward to simulate the sender having set
8266                         // the final amount and the routing node taking less fee
8267                         if let msgs::OutboundOnionPayload::Receive {
8268                                 ref mut sender_intended_htlc_amt_msat, ..
8269                         } = onion_payloads[1] {
8270                                 *sender_intended_htlc_amt_msat = 99_000;
8271                         } else { panic!() }
8272                         let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap();
8273                         payment_event.msgs[0].onion_routing_packet = new_onion_packet;
8274                 }
8275
8276                 node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]);
8277                 check_added_monitors!(node, 0);
8278                 commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false);
8279                 expect_pending_htlcs_forwardable!(node);
8280
8281                 if idx == 0 {
8282                         let mut events_2 = node.node.get_and_clear_pending_msg_events();
8283                         assert_eq!(events_2.len(), 1);
8284                         check_added_monitors!(node, 1);
8285                         payment_event = SendEvent::from_event(events_2.remove(0));
8286                         assert_eq!(payment_event.msgs.len(), 1);
8287                 } else {
8288                         let events_2 = node.node.get_and_clear_pending_events();
8289                         assert!(events_2.is_empty());
8290                 }
8291
8292                 prev_node = node;
8293         }
8294
8295         // Second path
8296         let ev = remove_first_msg_event_to_node(&expected_paths[1][0].node.get_our_node_id(), &mut events);
8297         pass_along_path(&nodes[0], expected_paths[1], 101_000, our_payment_hash.clone(), Some(our_payment_secret), ev, true, None);
8298
8299         claim_payment_along_route(&nodes[0], expected_paths, false, our_payment_preimage);
8300 }
8301
8302 fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64) {
8303
8304         let routing_node_count = msat_amounts.len();
8305         let node_count = routing_node_count + 2;
8306
8307         let chanmon_cfgs = create_chanmon_cfgs(node_count);
8308         let node_cfgs = create_node_cfgs(node_count, &chanmon_cfgs);
8309         let node_chanmgrs = create_node_chanmgrs(node_count, &node_cfgs, &vec![None; node_count]);
8310         let nodes = create_network(node_count, &node_cfgs, &node_chanmgrs);
8311
8312         let src_idx = 0;
8313         let dst_idx = 1;
8314
8315         // Create channels for each amount
8316         let mut expected_paths = Vec::with_capacity(routing_node_count);
8317         let mut src_chan_ids = Vec::with_capacity(routing_node_count);
8318         let mut dst_chan_ids = Vec::with_capacity(routing_node_count);
8319         for i in 0..routing_node_count {
8320                 let routing_node = 2 + i;
8321                 let src_chan_id = create_announced_chan_between_nodes(&nodes, src_idx, routing_node).0.contents.short_channel_id;
8322                 src_chan_ids.push(src_chan_id);
8323                 let dst_chan_id = create_announced_chan_between_nodes(&nodes, routing_node, dst_idx).0.contents.short_channel_id;
8324                 dst_chan_ids.push(dst_chan_id);
8325                 let path = vec![&nodes[routing_node], &nodes[dst_idx]];
8326                 expected_paths.push(path);
8327         }
8328         let expected_paths: Vec<&[&Node]> = expected_paths.iter().map(|route| route.as_slice()).collect();
8329
8330         // Create a route for each amount
8331         let example_amount = 100000;
8332         let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[src_idx], nodes[dst_idx], example_amount);
8333         let sample_path = route.paths.pop().unwrap();
8334         for i in 0..routing_node_count {
8335                 let routing_node = 2 + i;
8336                 let mut path = sample_path.clone();
8337                 path.hops[0].pubkey = nodes[routing_node].node.get_our_node_id();
8338                 path.hops[0].short_channel_id = src_chan_ids[i];
8339                 path.hops[1].pubkey = nodes[dst_idx].node.get_our_node_id();
8340                 path.hops[1].short_channel_id = dst_chan_ids[i];
8341                 path.hops[1].fee_msat = msat_amounts[i];
8342                 route.paths.push(path);
8343         }
8344
8345         // Send payment with manually set total_msat
8346         let payment_id = PaymentId(nodes[src_idx].keys_manager.backing.get_secure_random_bytes());
8347         let onion_session_privs = nodes[src_idx].node.test_add_new_pending_payment(our_payment_hash,
8348                 RecipientOnionFields::secret_only(our_payment_secret), payment_id, &route).unwrap();
8349         nodes[src_idx].node.test_send_payment_internal(&route, our_payment_hash,
8350                 RecipientOnionFields::secret_only(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap();
8351         check_added_monitors!(nodes[src_idx], expected_paths.len());
8352
8353         let mut events = nodes[src_idx].node.get_and_clear_pending_msg_events();
8354         assert_eq!(events.len(), expected_paths.len());
8355         let mut amount_received = 0;
8356         for (path_idx, expected_path) in expected_paths.iter().enumerate() {
8357                 let ev = remove_first_msg_event_to_node(&expected_path[0].node.get_our_node_id(), &mut events);
8358
8359                 let current_path_amount = msat_amounts[path_idx];
8360                 amount_received += current_path_amount;
8361                 let became_claimable_now = amount_received >= total_msat && amount_received - current_path_amount < total_msat;
8362                 pass_along_path(&nodes[src_idx], expected_path, amount_received, our_payment_hash.clone(), Some(our_payment_secret), ev, became_claimable_now, None);
8363         }
8364
8365         claim_payment_along_route(&nodes[src_idx], &expected_paths, false, our_payment_preimage);
8366 }
8367
8368 #[test]
8369 fn test_overshoot_mpp() {
8370         do_test_overshoot_mpp(&[100_000, 101_000], 200_000);
8371         do_test_overshoot_mpp(&[100_000, 10_000, 100_000], 200_000);
8372 }
8373
8374 #[test]
8375 fn test_simple_mpp() {
8376         // Simple test of sending a multi-path payment.
8377         let chanmon_cfgs = create_chanmon_cfgs(4);
8378         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
8379         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
8380         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
8381
8382         let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8383         let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
8384         let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
8385         let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
8386
8387         let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
8388         let path = route.paths[0].clone();
8389         route.paths.push(path);
8390         route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id();
8391         route.paths[0].hops[0].short_channel_id = chan_1_id;
8392         route.paths[0].hops[1].short_channel_id = chan_3_id;
8393         route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id();
8394         route.paths[1].hops[0].short_channel_id = chan_2_id;
8395         route.paths[1].hops[1].short_channel_id = chan_4_id;
8396         send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret);
8397         claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
8398 }
8399
8400 #[test]
8401 fn test_preimage_storage() {
8402         // Simple test of payment preimage storage allowing no client-side storage to claim payments
8403         let chanmon_cfgs = create_chanmon_cfgs(2);
8404         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8405         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8406         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8407
8408         create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8409
8410         {
8411                 let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 7200, None).unwrap();
8412                 let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
8413                 nodes[0].node.send_payment_with_route(&route, payment_hash,
8414                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
8415                 check_added_monitors!(nodes[0], 1);
8416                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8417                 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
8418                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8419                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8420         }
8421         // Note that after leaving the above scope we have no knowledge of any arguments or return
8422         // values from previous calls.
8423         expect_pending_htlcs_forwardable!(nodes[1]);
8424         let events = nodes[1].node.get_and_clear_pending_events();
8425         assert_eq!(events.len(), 1);
8426         match events[0] {
8427                 Event::PaymentClaimable { ref purpose, .. } => {
8428                         match &purpose {
8429                                 PaymentPurpose::Bolt11InvoicePayment { payment_preimage, .. } => {
8430                                         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage.unwrap());
8431                                 },
8432                                 _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
8433                         }
8434                 },
8435                 _ => panic!("Unexpected event"),
8436         }
8437 }
8438
8439 #[test]
8440 fn test_bad_secret_hash() {
8441         // Simple test of unregistered payment hash/invalid payment secret handling
8442         let chanmon_cfgs = create_chanmon_cfgs(2);
8443         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8444         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8445         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8446
8447         create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8448
8449         let random_payment_hash = PaymentHash([42; 32]);
8450         let random_payment_secret = PaymentSecret([43; 32]);
8451         let (our_payment_hash, our_payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 2, None).unwrap();
8452         let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
8453
8454         // All the below cases should end up being handled exactly identically, so we macro the
8455         // resulting events.
8456         macro_rules! handle_unknown_invalid_payment_data {
8457                 ($payment_hash: expr) => {
8458                         check_added_monitors!(nodes[0], 1);
8459                         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8460                         let payment_event = SendEvent::from_event(events.pop().unwrap());
8461                         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8462                         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8463
8464                         // We have to forward pending HTLCs once to process the receipt of the HTLC and then
8465                         // again to process the pending backwards-failure of the HTLC
8466                         expect_pending_htlcs_forwardable!(nodes[1]);
8467                         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment{ payment_hash: $payment_hash }]);
8468                         check_added_monitors!(nodes[1], 1);
8469
8470                         // We should fail the payment back
8471                         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
8472                         match events.pop().unwrap() {
8473                                 MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } } => {
8474                                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
8475                                         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
8476                                 },
8477                                 _ => panic!("Unexpected event"),
8478                         }
8479                 }
8480         }
8481
8482         let expected_error_code = 0x4000|15; // incorrect_or_unknown_payment_details
8483         // Error data is the HTLC value (100,000) and current block height
8484         let expected_error_data = [0, 0, 0, 0, 0, 1, 0x86, 0xa0, 0, 0, 0, CHAN_CONFIRM_DEPTH as u8];
8485
8486         // Send a payment with the right payment hash but the wrong payment secret
8487         nodes[0].node.send_payment_with_route(&route, our_payment_hash,
8488                 RecipientOnionFields::secret_only(random_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
8489         handle_unknown_invalid_payment_data!(our_payment_hash);
8490         expect_payment_failed!(nodes[0], our_payment_hash, true, expected_error_code, expected_error_data);
8491
8492         // Send a payment with a random payment hash, but the right payment secret
8493         nodes[0].node.send_payment_with_route(&route, random_payment_hash,
8494                 RecipientOnionFields::secret_only(our_payment_secret), PaymentId(random_payment_hash.0)).unwrap();
8495         handle_unknown_invalid_payment_data!(random_payment_hash);
8496         expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
8497
8498         // Send a payment with a random payment hash and random payment secret
8499         nodes[0].node.send_payment_with_route(&route, random_payment_hash,
8500                 RecipientOnionFields::secret_only(random_payment_secret), PaymentId(random_payment_hash.0)).unwrap();
8501         handle_unknown_invalid_payment_data!(random_payment_hash);
8502         expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
8503 }
8504
8505 #[test]
8506 fn test_update_err_monitor_lockdown() {
8507         // Our monitor will lock update of local commitment transaction if a broadcastion condition
8508         // has been fulfilled (either force-close from Channel or block height requiring a HTLC-
8509         // timeout). Trying to update monitor after lockdown should return a ChannelMonitorUpdateStatus
8510         // error.
8511         //
8512         // This scenario may happen in a watchtower setup, where watchtower process a block height
8513         // triggering a timeout while a slow-block-processing ChannelManager receives a local signed
8514         // commitment at same time.
8515
8516         let chanmon_cfgs = create_chanmon_cfgs(2);
8517         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8518         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8519         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8520
8521         // Create some initial channel
8522         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
8523         let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 };
8524
8525         // Rebalance the network to generate htlc in the two directions
8526         send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
8527
8528         // Route a HTLC from node 0 to node 1 (but don't settle)
8529         let (preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
8530
8531         // Copy ChainMonitor to simulate a watchtower and update block height of node 0 until its ChannelMonitor timeout HTLC onchain
8532         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8533         let logger = test_utils::TestLogger::with_id(format!("node {}", 0));
8534         let persister = test_utils::TestPersister::new();
8535         let watchtower = {
8536                 let new_monitor = {
8537                         let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8538                         let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
8539                                         &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8540                         assert!(new_monitor == *monitor);
8541                         new_monitor
8542                 };
8543                 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8544                 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
8545                 watchtower
8546         };
8547         let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new());
8548         // Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating
8549         // transaction lock time requirements here.
8550         chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize(200, (block.clone(), 200));
8551         watchtower.chain_monitor.block_connected(&block, 200);
8552
8553         // Try to update ChannelMonitor
8554         nodes[1].node.claim_funds(preimage);
8555         check_added_monitors!(nodes[1], 1);
8556         expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
8557
8558         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8559         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
8560         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
8561         {
8562                 let mut node_0_per_peer_lock;
8563                 let mut node_0_peer_state_lock;
8564                 if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
8565                         if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
8566                                 assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress);
8567                                 assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8568                         } else { assert!(false); }
8569                 } else {
8570                         assert!(false);
8571                 }
8572         }
8573         // Our local monitor is in-sync and hasn't processed yet timeout
8574         check_added_monitors!(nodes[0], 1);
8575         let events = nodes[0].node.get_and_clear_pending_events();
8576         assert_eq!(events.len(), 1);
8577 }
8578
8579 #[test]
8580 fn test_concurrent_monitor_claim() {
8581         // Watchtower A receives block, broadcasts state N, then channel receives new state N+1,
8582         // sending it to both watchtowers, Bob accepts N+1, then receives block and broadcasts
8583         // the latest state N+1, Alice rejects state N+1, but Bob has already broadcast it,
8584         // state N+1 confirms. Alice claims output from state N+1.
8585
8586         let chanmon_cfgs = create_chanmon_cfgs(2);
8587         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8588         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8589         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8590
8591         // Create some initial channel
8592         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
8593         let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 };
8594
8595         // Rebalance the network to generate htlc in the two directions
8596         send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
8597
8598         // Route a HTLC from node 0 to node 1 (but don't settle)
8599         route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
8600
8601         // Copy ChainMonitor to simulate watchtower Alice and update block height her ChannelMonitor timeout HTLC onchain
8602         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8603         let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice"));
8604         let persister = test_utils::TestPersister::new();
8605         let alice_broadcaster = test_utils::TestBroadcaster::with_blocks(
8606                 Arc::new(Mutex::new(nodes[0].blocks.lock().unwrap().clone())),
8607         );
8608         let watchtower_alice = {
8609                 let new_monitor = {
8610                         let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8611                         let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
8612                                         &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8613                         assert!(new_monitor == *monitor);
8614                         new_monitor
8615                 };
8616                 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &alice_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8617                 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
8618                 watchtower
8619         };
8620         let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new());
8621         // Make Alice aware of enough blocks that it doesn't think we're violating transaction lock time
8622         // requirements here.
8623         const HTLC_TIMEOUT_BROADCAST: u32 = CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS;
8624         alice_broadcaster.blocks.lock().unwrap().resize((HTLC_TIMEOUT_BROADCAST) as usize, (block.clone(), HTLC_TIMEOUT_BROADCAST));
8625         watchtower_alice.chain_monitor.block_connected(&block, HTLC_TIMEOUT_BROADCAST);
8626
8627         // Watchtower Alice should have broadcast a commitment/HTLC-timeout
8628         {
8629                 let mut txn = alice_broadcaster.txn_broadcast();
8630                 assert_eq!(txn.len(), 2);
8631                 check_spends!(txn[0], chan_1.3);
8632                 check_spends!(txn[1], txn[0]);
8633         };
8634
8635         // Copy ChainMonitor to simulate watchtower Bob and make it receive a commitment update first.
8636         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8637         let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob"));
8638         let persister = test_utils::TestPersister::new();
8639         let bob_broadcaster = test_utils::TestBroadcaster::with_blocks(Arc::clone(&alice_broadcaster.blocks));
8640         let watchtower_bob = {
8641                 let new_monitor = {
8642                         let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
8643                         let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
8644                                         &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
8645                         assert!(new_monitor == *monitor);
8646                         new_monitor
8647                 };
8648                 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &bob_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8649                 assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
8650                 watchtower
8651         };
8652         watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST - 1);
8653
8654         // Route another payment to generate another update with still previous HTLC pending
8655         let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 3000000);
8656         nodes[1].node.send_payment_with_route(&route, payment_hash,
8657                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
8658         check_added_monitors!(nodes[1], 1);
8659
8660         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8661         assert_eq!(updates.update_add_htlcs.len(), 1);
8662         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]);
8663         {
8664                 let mut node_0_per_peer_lock;
8665                 let mut node_0_peer_state_lock;
8666                 if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
8667                         if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
8668                                 // Watchtower Alice should already have seen the block and reject the update
8669                                 assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress);
8670                                 assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8671                                 assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
8672                         } else { assert!(false); }
8673                 } else {
8674                         assert!(false);
8675                 }
8676         }
8677         // Our local monitor is in-sync and hasn't processed yet timeout
8678         check_added_monitors!(nodes[0], 1);
8679
8680         //// Provide one more block to watchtower Bob, expect broadcast of commitment and HTLC-Timeout
8681         watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST);
8682
8683         // Watchtower Bob should have broadcast a commitment/HTLC-timeout
8684         let bob_state_y;
8685         {
8686                 let mut txn = bob_broadcaster.txn_broadcast();
8687                 assert_eq!(txn.len(), 2);
8688                 bob_state_y = txn.remove(0);
8689         };
8690
8691         // We confirm Bob's state Y on Alice, she should broadcast a HTLC-timeout
8692         let height = HTLC_TIMEOUT_BROADCAST + 1;
8693         connect_blocks(&nodes[0], height - nodes[0].best_block_info().1);
8694         check_closed_broadcast(&nodes[0], 1, true);
8695         check_closed_event!(&nodes[0], 1, ClosureReason::HTLCsTimedOut, false,
8696                 [nodes[1].node.get_our_node_id()], 100000);
8697         watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height);
8698         check_added_monitors(&nodes[0], 1);
8699         {
8700                 let htlc_txn = alice_broadcaster.txn_broadcast();
8701                 assert_eq!(htlc_txn.len(), 1);
8702                 check_spends!(htlc_txn[0], bob_state_y);
8703         }
8704 }
8705
8706 #[test]
8707 fn test_pre_lockin_no_chan_closed_update() {
8708         // Test that if a peer closes a channel in response to a funding_created message we don't
8709         // generate a channel update (as the channel cannot appear on chain without a funding_signed
8710         // message).
8711         //
8712         // Doing so would imply a channel monitor update before the initial channel monitor
8713         // registration, violating our API guarantees.
8714         //
8715         // Previously, full_stack_target managed to hit this case by opening then closing a channel,
8716         // then opening a second channel with the same funding output as the first (which is not
8717         // rejected because the first channel does not exist in the ChannelManager) and closing it
8718         // before receiving funding_signed.
8719         let chanmon_cfgs = create_chanmon_cfgs(2);
8720         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8721         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8722         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8723
8724         // Create an initial channel
8725         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
8726         let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8727         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
8728         let accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
8729         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
8730
8731         // Move the first channel through the funding flow...
8732         let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
8733
8734         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
8735         check_added_monitors!(nodes[0], 0);
8736
8737         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
8738         let channel_id = ChannelId::v1_from_funding_outpoint(crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index });
8739         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
8740         assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
8741         check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true,
8742                 [nodes[1].node.get_our_node_id()], 100000);
8743 }
8744
8745 #[test]
8746 fn test_htlc_no_detection() {
8747         // This test is a mutation to underscore the detection logic bug we had
8748         // before #653. HTLC value routed is above the remaining balance, thus
8749         // inverting HTLC and `to_remote` output. HTLC will come second and
8750         // it wouldn't be seen by pre-#653 detection as we were enumerate()'ing
8751         // on a watched outputs vector (Vec<TxOut>) thus implicitly relying on
8752         // outputs order detection for correct spending children filtring.
8753
8754         let chanmon_cfgs = create_chanmon_cfgs(2);
8755         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8756         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8757         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8758
8759         // Create some initial channels
8760         let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
8761
8762         send_payment(&nodes[0], &vec!(&nodes[1])[..], 1_000_000);
8763         let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 2_000_000);
8764         let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
8765         assert_eq!(local_txn[0].input.len(), 1);
8766         assert_eq!(local_txn[0].output.len(), 3);
8767         check_spends!(local_txn[0], chan_1.3);
8768
8769         // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
8770         let block = create_dummy_block(nodes[0].best_block_hash(), 42, vec![local_txn[0].clone()]);
8771         connect_block(&nodes[0], &block);
8772         // We deliberately connect the local tx twice as this should provoke a failure calling
8773         // this test before #653 fix.
8774         chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &block, nodes[0].best_block_info().1 + 1);
8775         check_closed_broadcast!(nodes[0], true);
8776         check_added_monitors!(nodes[0], 1);
8777         check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
8778         connect_blocks(&nodes[0], TEST_FINAL_CLTV);
8779
8780         let htlc_timeout = {
8781                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8782                 assert_eq!(node_txn.len(), 1);
8783                 assert_eq!(node_txn[0].input.len(), 1);
8784                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
8785                 check_spends!(node_txn[0], local_txn[0]);
8786                 node_txn[0].clone()
8787         };
8788
8789         connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![htlc_timeout.clone()]));
8790         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
8791         expect_payment_failed!(nodes[0], our_payment_hash, false);
8792 }
8793
8794 fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain_before_fulfill: bool) {
8795         // If we route an HTLC, then learn the HTLC's preimage after the upstream channel has been
8796         // force-closed, we must claim that HTLC on-chain. (Given an HTLC forwarded from Alice --> Bob -->
8797         // Carol, Alice would be the upstream node, and Carol the downstream.)
8798         //
8799         // Steps of the test:
8800         // 1) Alice sends a HTLC to Carol through Bob.
8801         // 2) Carol doesn't settle the HTLC.
8802         // 3) If broadcast_alice is true, Alice force-closes her channel with Bob. Else Bob force closes.
8803         // Steps 4 and 5 may be reordered depending on go_onchain_before_fulfill.
8804         // 4) Bob sees the Alice's commitment on his chain or vice versa. An offered output is present
8805         //    but can't be claimed as Bob doesn't have yet knowledge of the preimage.
8806         // 5) Carol release the preimage to Bob off-chain.
8807         // 6) Bob claims the offered output on the broadcasted commitment.
8808         let chanmon_cfgs = create_chanmon_cfgs(3);
8809         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
8810         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
8811         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
8812
8813         // Create some initial channels
8814         let chan_ab = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
8815         create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 10001);
8816
8817         // Steps (1) and (2):
8818         // Send an HTLC Alice --> Bob --> Carol, but Carol doesn't settle the HTLC back.
8819         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
8820
8821         // Check that Alice's commitment transaction now contains an output for this HTLC.
8822         let alice_txn = get_local_commitment_txn!(nodes[0], chan_ab.2);
8823         check_spends!(alice_txn[0], chan_ab.3);
8824         assert_eq!(alice_txn[0].output.len(), 2);
8825         check_spends!(alice_txn[1], alice_txn[0]); // 2nd transaction is a non-final HTLC-timeout
8826         assert_eq!(alice_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
8827         assert_eq!(alice_txn.len(), 2);
8828
8829         // Steps (3) and (4):
8830         // If `go_onchain_before_fufill`, broadcast the relevant commitment transaction and check that Bob
8831         // responds by (1) broadcasting a channel update and (2) adding a new ChannelMonitor.
8832         let mut force_closing_node = 0; // Alice force-closes
8833         let mut counterparty_node = 1; // Bob if Alice force-closes
8834
8835         // Bob force-closes
8836         if !broadcast_alice {
8837                 force_closing_node = 1;
8838                 counterparty_node = 0;
8839         }
8840         nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
8841         check_closed_broadcast!(nodes[force_closing_node], true);
8842         check_added_monitors!(nodes[force_closing_node], 1);
8843         check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed, [nodes[counterparty_node].node.get_our_node_id()], 100000);
8844         if go_onchain_before_fulfill {
8845                 let txn_to_broadcast = match broadcast_alice {
8846                         true => alice_txn.clone(),
8847                         false => get_local_commitment_txn!(nodes[1], chan_ab.2)
8848                 };
8849                 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()]));
8850                 if broadcast_alice {
8851                         check_closed_broadcast!(nodes[1], true);
8852                         check_added_monitors!(nodes[1], 1);
8853                         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
8854                 }
8855         }
8856
8857         // Step (5):
8858         // Carol then claims the funds and sends an update_fulfill message to Bob, and they go through the
8859         // process of removing the HTLC from their commitment transactions.
8860         nodes[2].node.claim_funds(payment_preimage);
8861         check_added_monitors!(nodes[2], 1);
8862         expect_payment_claimed!(nodes[2], payment_hash, 3_000_000);
8863
8864         let carol_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
8865         assert!(carol_updates.update_add_htlcs.is_empty());
8866         assert!(carol_updates.update_fail_htlcs.is_empty());
8867         assert!(carol_updates.update_fail_malformed_htlcs.is_empty());
8868         assert!(carol_updates.update_fee.is_none());
8869         assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1);
8870
8871         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]);
8872         let went_onchain = go_onchain_before_fulfill || force_closing_node == 1;
8873         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], if went_onchain { None } else { Some(1000) }, went_onchain, false);
8874         // If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage.
8875         if !go_onchain_before_fulfill && broadcast_alice {
8876                 let events = nodes[1].node.get_and_clear_pending_msg_events();
8877                 assert_eq!(events.len(), 1);
8878                 match events[0] {
8879                         MessageSendEvent::UpdateHTLCs { ref node_id, .. } => {
8880                                 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
8881                         },
8882                         _ => panic!("Unexpected event"),
8883                 };
8884         }
8885         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &carol_updates.commitment_signed);
8886         // One monitor update for the preimage to update the Bob<->Alice channel, one monitor update
8887         // Carol<->Bob's updated commitment transaction info.
8888         check_added_monitors!(nodes[1], 2);
8889
8890         let events = nodes[1].node.get_and_clear_pending_msg_events();
8891         assert_eq!(events.len(), 2);
8892         let bob_revocation = match events[0] {
8893                 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
8894                         assert_eq!(*node_id, nodes[2].node.get_our_node_id());
8895                         (*msg).clone()
8896                 },
8897                 _ => panic!("Unexpected event"),
8898         };
8899         let bob_updates = match events[1] {
8900                 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
8901                         assert_eq!(*node_id, nodes[2].node.get_our_node_id());
8902                         (*updates).clone()
8903                 },
8904                 _ => panic!("Unexpected event"),
8905         };
8906
8907         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revocation);
8908         check_added_monitors!(nodes[2], 1);
8909         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_updates.commitment_signed);
8910         check_added_monitors!(nodes[2], 1);
8911
8912         let events = nodes[2].node.get_and_clear_pending_msg_events();
8913         assert_eq!(events.len(), 1);
8914         let carol_revocation = match events[0] {
8915                 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
8916                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
8917                         (*msg).clone()
8918                 },
8919                 _ => panic!("Unexpected event"),
8920         };
8921         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &carol_revocation);
8922         check_added_monitors!(nodes[1], 1);
8923
8924         // If this test requires the force-closed channel to not be on-chain until after the fulfill,
8925         // here's where we put said channel's commitment tx on-chain.
8926         let mut txn_to_broadcast = alice_txn.clone();
8927         if !broadcast_alice { txn_to_broadcast = get_local_commitment_txn!(nodes[1], chan_ab.2); }
8928         if !go_onchain_before_fulfill {
8929                 connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()]));
8930                 // If Bob was the one to force-close, he will have already passed these checks earlier.
8931                 if broadcast_alice {
8932                         check_closed_broadcast!(nodes[1], true);
8933                         check_added_monitors!(nodes[1], 1);
8934                         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
8935                 }
8936                 let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
8937                 if broadcast_alice {
8938                         assert_eq!(bob_txn.len(), 1);
8939                         check_spends!(bob_txn[0], txn_to_broadcast[0]);
8940                 } else {
8941                         if nodes[1].connect_style.borrow().updates_best_block_first() {
8942                                 assert_eq!(bob_txn.len(), 3);
8943                                 assert_eq!(bob_txn[0].txid(), bob_txn[1].txid());
8944                         } else {
8945                                 assert_eq!(bob_txn.len(), 2);
8946                         }
8947                         check_spends!(bob_txn[0], chan_ab.3);
8948                 }
8949         }
8950
8951         // Step (6):
8952         // Finally, check that Bob broadcasted a preimage-claiming transaction for the HTLC output on the
8953         // broadcasted commitment transaction.
8954         {
8955                 let script_weight = match broadcast_alice {
8956                         true => OFFERED_HTLC_SCRIPT_WEIGHT,
8957                         false => ACCEPTED_HTLC_SCRIPT_WEIGHT
8958                 };
8959                 // If Alice force-closed, Bob only broadcasts a HTLC-output-claiming transaction. Otherwise,
8960                 // Bob force-closed and broadcasts the commitment transaction along with a
8961                 // HTLC-output-claiming transaction.
8962                 let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
8963                 if broadcast_alice {
8964                         assert_eq!(bob_txn.len(), 1);
8965                         check_spends!(bob_txn[0], txn_to_broadcast[0]);
8966                         assert_eq!(bob_txn[0].input[0].witness.last().unwrap().len(), script_weight);
8967                 } else {
8968                         assert_eq!(bob_txn.len(), if nodes[1].connect_style.borrow().updates_best_block_first() { 3 } else { 2 });
8969                         let htlc_tx = bob_txn.pop().unwrap();
8970                         check_spends!(htlc_tx, txn_to_broadcast[0]);
8971                         assert_eq!(htlc_tx.input[0].witness.last().unwrap().len(), script_weight);
8972                 }
8973         }
8974 }
8975
8976 #[test]
8977 fn test_onchain_htlc_settlement_after_close() {
8978         do_test_onchain_htlc_settlement_after_close(true, true);
8979         do_test_onchain_htlc_settlement_after_close(false, true); // Technically redundant, but may as well
8980         do_test_onchain_htlc_settlement_after_close(true, false);
8981         do_test_onchain_htlc_settlement_after_close(false, false);
8982 }
8983
8984 #[test]
8985 fn test_duplicate_temporary_channel_id_from_different_peers() {
8986         // Tests that we can accept two different `OpenChannel` requests with the same
8987         // `temporary_channel_id`, as long as they are from different peers.
8988         let chanmon_cfgs = create_chanmon_cfgs(3);
8989         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
8990         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
8991         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
8992
8993         // Create an first channel channel
8994         nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
8995         let mut open_chan_msg_chan_1_0 = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
8996
8997         // Create an second channel
8998         nodes[2].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 43, None, None).unwrap();
8999         let mut open_chan_msg_chan_2_0 = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
9000
9001         // Modify the `OpenChannel` from `nodes[2]` to `nodes[0]` to ensure that it uses the same
9002         // `temporary_channel_id` as the `OpenChannel` from nodes[1] to nodes[0].
9003         open_chan_msg_chan_2_0.common_fields.temporary_channel_id = open_chan_msg_chan_1_0.common_fields.temporary_channel_id;
9004
9005         // Assert that `nodes[0]` can accept both `OpenChannel` requests, even though they use the same
9006         // `temporary_channel_id` as they are from different peers.
9007         nodes[0].node.handle_open_channel(&nodes[1].node.get_our_node_id(), &open_chan_msg_chan_1_0);
9008         {
9009                 let events = nodes[0].node.get_and_clear_pending_msg_events();
9010                 assert_eq!(events.len(), 1);
9011                 match &events[0] {
9012                         MessageSendEvent::SendAcceptChannel { node_id, msg } => {
9013                                 assert_eq!(node_id, &nodes[1].node.get_our_node_id());
9014                                 assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id);
9015                         },
9016                         _ => panic!("Unexpected event"),
9017                 }
9018         }
9019
9020         nodes[0].node.handle_open_channel(&nodes[2].node.get_our_node_id(), &open_chan_msg_chan_2_0);
9021         {
9022                 let events = nodes[0].node.get_and_clear_pending_msg_events();
9023                 assert_eq!(events.len(), 1);
9024                 match &events[0] {
9025                         MessageSendEvent::SendAcceptChannel { node_id, msg } => {
9026                                 assert_eq!(node_id, &nodes[2].node.get_our_node_id());
9027                                 assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id);
9028                         },
9029                         _ => panic!("Unexpected event"),
9030                 }
9031         }
9032 }
9033
9034 #[test]
9035 fn test_peer_funding_sidechannel() {
9036         // Test that if a peer somehow learns which txid we'll use for our channel funding before we
9037         // receive `funding_transaction_generated` the peer cannot cause us to crash. We'd previously
9038         // assumed that LDK would receive `funding_transaction_generated` prior to our peer learning
9039         // the txid and panicked if the peer tried to open a redundant channel to us with the same
9040         // funding outpoint.
9041         //
9042         // While this assumption is generally safe, some users may have out-of-band protocols where
9043         // they notify their LSP about a funding outpoint first, or this may be violated in the future
9044         // with collaborative transaction construction protocols, i.e. dual-funding.
9045         let chanmon_cfgs = create_chanmon_cfgs(3);
9046         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9047         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9048         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9049
9050         let temp_chan_id_ab = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
9051         let temp_chan_id_ca = exchange_open_accept_chan(&nodes[2], &nodes[0], 1_000_000, 0);
9052
9053         let (_, tx, funding_output) =
9054                 create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
9055
9056         let cs_funding_events = nodes[2].node.get_and_clear_pending_events();
9057         assert_eq!(cs_funding_events.len(), 1);
9058         match cs_funding_events[0] {
9059                 Event::FundingGenerationReady { .. } => {}
9060                 _ => panic!("Unexpected event {:?}", cs_funding_events),
9061         }
9062
9063         nodes[2].node.funding_transaction_generated_unchecked(&temp_chan_id_ca, &nodes[0].node.get_our_node_id(), tx.clone(), funding_output.index).unwrap();
9064         let funding_created_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, nodes[0].node.get_our_node_id());
9065         nodes[0].node.handle_funding_created(&nodes[2].node.get_our_node_id(), &funding_created_msg);
9066         get_event_msg!(nodes[0], MessageSendEvent::SendFundingSigned, nodes[2].node.get_our_node_id());
9067         expect_channel_pending_event(&nodes[0], &nodes[2].node.get_our_node_id());
9068         check_added_monitors!(nodes[0], 1);
9069
9070         let res = nodes[0].node.funding_transaction_generated(&temp_chan_id_ab, &nodes[1].node.get_our_node_id(), tx.clone());
9071         let err_msg = format!("{:?}", res.unwrap_err());
9072         assert!(err_msg.contains("An existing channel using outpoint "));
9073         assert!(err_msg.contains(" is open with peer"));
9074         // Even though the last funding_transaction_generated errored, it still generated a
9075         // SendFundingCreated. However, when the peer responds with a funding_signed it will send the
9076         // appropriate error message.
9077         let as_funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9078         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &as_funding_created);
9079         check_added_monitors!(nodes[1], 1);
9080         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9081         let reason = ClosureReason::ProcessingError { err: format!("An existing channel using outpoint {} is open with peer {}", funding_output, nodes[2].node.get_our_node_id()), };
9082         check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(ChannelId::v1_from_funding_outpoint(funding_output), true, reason)]);
9083
9084         let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9085         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
9086         get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id());
9087 }
9088
9089 #[test]
9090 fn test_duplicate_conflicting_funding_from_second_peer() {
9091         // Test that if a user tries to fund a channel with a funding outpoint they'd previously used
9092         // we don't try to remove the previous ChannelMonitor. This is largely a test to ensure we
9093         // don't regress in the fuzzer, as such funding getting passed our outpoint-matches checks
9094         // implies the user (and our counterparty) has reused cryptographic keys across channels, which
9095         // we require the user not do.
9096         let chanmon_cfgs = create_chanmon_cfgs(4);
9097         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9098         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9099         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9100
9101         let temp_chan_id = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
9102
9103         let (_, tx, funding_output) =
9104                 create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
9105
9106         // Now that we have a funding outpoint, create a dummy `ChannelMonitor` and insert it into
9107         // nodes[0]'s ChainMonitor so that the initial `ChannelMonitor` write fails.
9108         let dummy_chan_id = create_chan_between_nodes(&nodes[2], &nodes[3]).3;
9109         let dummy_monitor = get_monitor!(nodes[2], dummy_chan_id).clone();
9110         nodes[0].chain_monitor.chain_monitor.watch_channel(funding_output, dummy_monitor).unwrap();
9111
9112         nodes[0].node.funding_transaction_generated(&temp_chan_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9113
9114         let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9115         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
9116         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9117         check_added_monitors!(nodes[1], 1);
9118         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9119
9120         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
9121         // At this point, the channel should be closed, after having generated one monitor write (the
9122         // watch_channel call which failed), but zero monitor updates.
9123         check_added_monitors!(nodes[0], 1);
9124         get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id());
9125         let err_reason = ClosureReason::ProcessingError { err: "Channel funding outpoint was a duplicate".to_owned() };
9126         check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(funding_signed_msg.channel_id, true, err_reason)]);
9127 }
9128
9129 #[test]
9130 fn test_duplicate_funding_err_in_funding() {
9131         // Test that if we have a live channel with one peer, then another peer comes along and tries
9132         // to create a second channel with the same txid we'll fail and not overwrite the
9133         // outpoint_to_peer map in `ChannelManager`.
9134         //
9135         // This was previously broken.
9136         let chanmon_cfgs = create_chanmon_cfgs(3);
9137         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9138         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9139         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9140
9141         let (_, _, _, real_channel_id, funding_tx) = create_chan_between_nodes(&nodes[0], &nodes[1]);
9142         let real_chan_funding_txo = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 };
9143         assert_eq!(ChannelId::v1_from_funding_outpoint(real_chan_funding_txo), real_channel_id);
9144
9145         nodes[2].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
9146         let mut open_chan_msg = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9147         let node_c_temp_chan_id = open_chan_msg.common_fields.temporary_channel_id;
9148         open_chan_msg.common_fields.temporary_channel_id = real_channel_id;
9149         nodes[1].node.handle_open_channel(&nodes[2].node.get_our_node_id(), &open_chan_msg);
9150         let mut accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[2].node.get_our_node_id());
9151         accept_chan_msg.common_fields.temporary_channel_id = node_c_temp_chan_id;
9152         nodes[2].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
9153
9154         // Now that we have a second channel with the same funding txo, send a bogus funding message
9155         // and let nodes[1] remove the inbound channel.
9156         let (_, funding_tx, _) = create_funding_transaction(&nodes[2], &nodes[1].node.get_our_node_id(), 100_000, 42);
9157
9158         nodes[2].node.funding_transaction_generated(&node_c_temp_chan_id, &nodes[1].node.get_our_node_id(), funding_tx).unwrap();
9159
9160         let mut funding_created_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9161         funding_created_msg.temporary_channel_id = real_channel_id;
9162         // Make the signature invalid by changing the funding output
9163         funding_created_msg.funding_output_index += 10;
9164         nodes[1].node.handle_funding_created(&nodes[2].node.get_our_node_id(), &funding_created_msg);
9165         get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id());
9166         let err = "Invalid funding_created signature from peer".to_owned();
9167         let reason = ClosureReason::ProcessingError { err };
9168         let expected_closing = ExpectedCloseEvent::from_id_reason(real_channel_id, false, reason);
9169         check_closed_events(&nodes[1], &[expected_closing]);
9170
9171         assert_eq!(
9172                 *nodes[1].node.outpoint_to_peer.lock().unwrap().get(&real_chan_funding_txo).unwrap(),
9173                 nodes[0].node.get_our_node_id()
9174         );
9175 }
9176
9177 #[test]
9178 fn test_duplicate_chan_id() {
9179         // Test that if a given peer tries to open a channel with the same channel_id as one that is
9180         // already open we reject it and keep the old channel.
9181         //
9182         // Previously, full_stack_target managed to figure out that if you tried to open two channels
9183         // with the same funding output (ie post-funding channel_id), we'd create a monitor update for
9184         // the existing channel when we detect the duplicate new channel, screwing up our monitor
9185         // updating logic for the existing channel.
9186         let chanmon_cfgs = create_chanmon_cfgs(2);
9187         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9188         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9189         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9190
9191         // Create an initial channel
9192         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
9193         let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9194         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
9195         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9196
9197         // Try to create a second channel with the same temporary_channel_id as the first and check
9198         // that it is rejected.
9199         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
9200         {
9201                 let events = nodes[1].node.get_and_clear_pending_msg_events();
9202                 assert_eq!(events.len(), 1);
9203                 match events[0] {
9204                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
9205                                 // Technically, at this point, nodes[1] would be justified in thinking both the
9206                                 // first (valid) and second (invalid) channels are closed, given they both have
9207                                 // the same non-temporary channel_id. However, currently we do not, so we just
9208                                 // move forward with it.
9209                                 assert_eq!(msg.channel_id, open_chan_msg.common_fields.temporary_channel_id);
9210                                 assert_eq!(node_id, nodes[0].node.get_our_node_id());
9211                         },
9212                         _ => panic!("Unexpected event"),
9213                 }
9214         }
9215
9216         // Move the first channel through the funding flow...
9217         let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
9218
9219         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9220         check_added_monitors!(nodes[0], 0);
9221
9222         let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9223         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
9224         {
9225                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
9226                 assert_eq!(added_monitors.len(), 1);
9227                 assert_eq!(added_monitors[0].0, funding_output);
9228                 added_monitors.clear();
9229         }
9230         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9231
9232         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9233
9234         let funding_outpoint = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index };
9235         let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint);
9236
9237         // Now we have the first channel past funding_created (ie it has a txid-based channel_id, not a
9238         // temporary one).
9239
9240         // First try to open a second channel with a temporary channel id equal to the txid-based one.
9241         // Technically this is allowed by the spec, but we don't support it and there's little reason
9242         // to. Still, it shouldn't cause any other issues.
9243         open_chan_msg.common_fields.temporary_channel_id = channel_id;
9244         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
9245         {
9246                 let events = nodes[1].node.get_and_clear_pending_msg_events();
9247                 assert_eq!(events.len(), 1);
9248                 match events[0] {
9249                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
9250                                 // Technically, at this point, nodes[1] would be justified in thinking both
9251                                 // channels are closed, but currently we do not, so we just move forward with it.
9252                                 assert_eq!(msg.channel_id, open_chan_msg.common_fields.temporary_channel_id);
9253                                 assert_eq!(node_id, nodes[0].node.get_our_node_id());
9254                         },
9255                         _ => panic!("Unexpected event"),
9256                 }
9257         }
9258
9259         // Now try to create a second channel which has a duplicate funding output.
9260         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
9261         let open_chan_2_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9262         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_2_msg);
9263         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9264         create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); // Get and check the FundingGenerationReady event
9265
9266         let funding_created = {
9267                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
9268                 let mut a_peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
9269                 // Once we call `get_funding_created` the channel has a duplicate channel_id as
9270                 // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
9271                 // try to create another channel. Instead, we drop the channel entirely here (leaving the
9272                 // channelmanager in a possibly nonsense state instead).
9273                 match a_peer_state.channel_by_id.remove(&open_chan_2_msg.common_fields.temporary_channel_id).unwrap() {
9274                         ChannelPhase::UnfundedOutboundV1(mut chan) => {
9275                                 let logger = test_utils::TestLogger::new();
9276                                 chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap()
9277                         },
9278                         _ => panic!("Unexpected ChannelPhase variant"),
9279                 }.unwrap()
9280         };
9281         check_added_monitors!(nodes[0], 0);
9282         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
9283         // At this point we'll look up if the channel_id is present and immediately fail the channel
9284         // without trying to persist the `ChannelMonitor`.
9285         check_added_monitors!(nodes[1], 0);
9286
9287         check_closed_events(&nodes[1], &[
9288                 ExpectedCloseEvent::from_id_reason(funding_created.temporary_channel_id, false, ClosureReason::ProcessingError {
9289                         err: "Already had channel with the new channel_id".to_owned()
9290                 })
9291         ]);
9292
9293         // ...still, nodes[1] will reject the duplicate channel.
9294         {
9295                 let events = nodes[1].node.get_and_clear_pending_msg_events();
9296                 assert_eq!(events.len(), 1);
9297                 match events[0] {
9298                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
9299                                 // Technically, at this point, nodes[1] would be justified in thinking both
9300                                 // channels are closed, but currently we do not, so we just move forward with it.
9301                                 assert_eq!(msg.channel_id, channel_id);
9302                                 assert_eq!(node_id, nodes[0].node.get_our_node_id());
9303                         },
9304                         _ => panic!("Unexpected event"),
9305                 }
9306         }
9307
9308         // finally, finish creating the original channel and send a payment over it to make sure
9309         // everything is functional.
9310         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
9311         {
9312                 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
9313                 assert_eq!(added_monitors.len(), 1);
9314                 assert_eq!(added_monitors[0].0, funding_output);
9315                 added_monitors.clear();
9316         }
9317         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9318
9319         let events_4 = nodes[0].node.get_and_clear_pending_events();
9320         assert_eq!(events_4.len(), 0);
9321         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
9322         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
9323
9324         let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
9325         let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
9326         update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
9327
9328         send_payment(&nodes[0], &[&nodes[1]], 8000000);
9329 }
9330
9331 #[test]
9332 fn test_error_chans_closed() {
9333         // Test that we properly handle error messages, closing appropriate channels.
9334         //
9335         // Prior to #787 we'd allow a peer to make us force-close a channel we had with a different
9336         // peer. The "real" fix for that is to index channels with peers_ids, however in the mean time
9337         // we can test various edge cases around it to ensure we don't regress.
9338         let chanmon_cfgs = create_chanmon_cfgs(3);
9339         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9340         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9341         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9342
9343         // Create some initial channels
9344         let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9345         let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9346         let chan_3 = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100000, 10001);
9347
9348         assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
9349         assert_eq!(nodes[1].node.list_usable_channels().len(), 2);
9350         assert_eq!(nodes[2].node.list_usable_channels().len(), 1);
9351
9352         // Closing a channel from a different peer has no effect
9353         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_3.2, data: "ERR".to_owned() });
9354         assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
9355
9356         // Closing one channel doesn't impact others
9357         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
9358         check_added_monitors!(nodes[0], 1);
9359         check_closed_broadcast!(nodes[0], false);
9360         check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) },
9361                 [nodes[1].node.get_our_node_id()], 100000);
9362         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
9363         assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
9364         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
9365         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_3.2);
9366
9367         // A null channel ID should close all channels
9368         let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9369         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: ChannelId::new_zero(), data: "ERR".to_owned() });
9370         check_added_monitors!(nodes[0], 2);
9371         check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) },
9372                 [nodes[1].node.get_our_node_id(); 2], 100000);
9373         let events = nodes[0].node.get_and_clear_pending_msg_events();
9374         assert_eq!(events.len(), 2);
9375         match events[0] {
9376                 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
9377                         assert_eq!(msg.contents.flags & 2, 2);
9378                 },
9379                 _ => panic!("Unexpected event"),
9380         }
9381         match events[1] {
9382                 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
9383                         assert_eq!(msg.contents.flags & 2, 2);
9384                 },
9385                 _ => panic!("Unexpected event"),
9386         }
9387         // Note that at this point users of a standard PeerHandler will end up calling
9388         // peer_disconnected.
9389         assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
9390         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
9391
9392         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
9393         assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
9394         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
9395 }
9396
9397 #[test]
9398 fn test_invalid_funding_tx() {
9399         // Test that we properly handle invalid funding transactions sent to us from a peer.
9400         //
9401         // Previously, all other major lightning implementations had failed to properly sanitize
9402         // funding transactions from their counterparties, leading to a multi-implementation critical
9403         // security vulnerability (though we always sanitized properly, we've previously had
9404         // un-released crashes in the sanitization process).
9405         //
9406         // Further, if the funding transaction is consensus-valid, confirms, and is later spent, we'd
9407         // previously have crashed in `ChannelMonitor` even though we closed the channel as bogus and
9408         // gave up on it. We test this here by generating such a transaction.
9409         let chanmon_cfgs = create_chanmon_cfgs(2);
9410         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9411         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9412         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9413
9414         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 10_000, 42, None, None).unwrap();
9415         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
9416         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9417
9418         let (temporary_channel_id, mut tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
9419
9420         // Create a witness program which can be spent by a 4-empty-stack-elements witness and which is
9421         // 136 bytes long. This matches our "accepted HTLC preimage spend" matching, previously causing
9422         // a panic as we'd try to extract a 32 byte preimage from a witness element without checking
9423         // its length.
9424         let mut wit_program: Vec<u8> = channelmonitor::deliberately_bogus_accepted_htlc_witness_program();
9425         let wit_program_script: ScriptBuf = wit_program.into();
9426         for output in tx.output.iter_mut() {
9427                 // Make the confirmed funding transaction have a bogus script_pubkey
9428                 output.script_pubkey = ScriptBuf::new_v0_p2wsh(&wit_program_script.wscript_hash());
9429         }
9430
9431         nodes[0].node.funding_transaction_generated_unchecked(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone(), 0).unwrap();
9432         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
9433         check_added_monitors!(nodes[1], 1);
9434         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9435
9436         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
9437         check_added_monitors!(nodes[0], 1);
9438         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9439
9440         let events_1 = nodes[0].node.get_and_clear_pending_events();
9441         assert_eq!(events_1.len(), 0);
9442
9443         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
9444         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
9445         nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
9446
9447         let expected_err = "funding tx had wrong script/value or output index";
9448         confirm_transaction_at(&nodes[1], &tx, 1);
9449         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() },
9450                 [nodes[0].node.get_our_node_id()], 100000);
9451         check_added_monitors!(nodes[1], 1);
9452         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
9453         assert_eq!(events_2.len(), 1);
9454         if let MessageSendEvent::HandleError { node_id, action } = &events_2[0] {
9455                 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
9456                 if let msgs::ErrorAction::DisconnectPeer { msg } = action {
9457                         assert_eq!(msg.as_ref().unwrap().data, "Channel closed because of an exception: ".to_owned() + expected_err);
9458                 } else { panic!(); }
9459         } else { panic!(); }
9460         assert_eq!(nodes[1].node.list_channels().len(), 0);
9461
9462         // Now confirm a spend of the (bogus) funding transaction. As long as the witness is 5 elements
9463         // long the ChannelMonitor will try to read 32 bytes from the second-to-last element, panicing
9464         // as its not 32 bytes long.
9465         let mut spend_tx = Transaction {
9466                 version: 2i32, lock_time: LockTime::ZERO,
9467                 input: tx.output.iter().enumerate().map(|(idx, _)| TxIn {
9468                         previous_output: BitcoinOutPoint {
9469                                 txid: tx.txid(),
9470                                 vout: idx as u32,
9471                         },
9472                         script_sig: ScriptBuf::new(),
9473                         sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
9474                         witness: Witness::from_slice(&channelmonitor::deliberately_bogus_accepted_htlc_witness())
9475                 }).collect(),
9476                 output: vec![TxOut {
9477                         value: 1000,
9478                         script_pubkey: ScriptBuf::new(),
9479                 }]
9480         };
9481         check_spends!(spend_tx, tx);
9482         mine_transaction(&nodes[1], &spend_tx);
9483 }
9484
9485 #[test]
9486 fn test_coinbase_funding_tx() {
9487         // Miners are able to fund channels directly from coinbase transactions, however
9488         // by consensus rules, outputs of a coinbase transaction are encumbered by a 100
9489         // block maturity timelock. To ensure that a (non-0conf) channel like this is enforceable
9490         // on-chain, the minimum depth is updated to 100 blocks for coinbase funding transactions.
9491         //
9492         // Note that 0conf channels with coinbase funding transactions are unaffected and are
9493         // immediately operational after opening.
9494         let chanmon_cfgs = create_chanmon_cfgs(2);
9495         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9496         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9497         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9498
9499         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
9500         let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9501
9502         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
9503         let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
9504
9505         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
9506
9507         // Create the coinbase funding transaction.
9508         let (temporary_channel_id, tx, _) = create_coinbase_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
9509
9510         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9511         check_added_monitors!(nodes[0], 0);
9512         let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9513
9514         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
9515         check_added_monitors!(nodes[1], 1);
9516         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9517
9518         let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9519
9520         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
9521         check_added_monitors!(nodes[0], 1);
9522
9523         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9524         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
9525
9526         // Starting at height 0, we "confirm" the coinbase at height 1.
9527         confirm_transaction_at(&nodes[0], &tx, 1);
9528         // We connect 98 more blocks to have 99 confirmations for the coinbase transaction.
9529         connect_blocks(&nodes[0], COINBASE_MATURITY - 2);
9530         // Check that we have no pending message events (we have not queued a `channel_ready` yet).
9531         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
9532         // Now connect one more block which results in 100 confirmations of the coinbase transaction.
9533         connect_blocks(&nodes[0], 1);
9534         // There should now be a `channel_ready` which can be handled.
9535         let _ = &nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &get_event_msg!(&nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
9536
9537         confirm_transaction_at(&nodes[1], &tx, 1);
9538         connect_blocks(&nodes[1], COINBASE_MATURITY - 2);
9539         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
9540         connect_blocks(&nodes[1], 1);
9541         expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id());
9542         create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
9543 }
9544
9545 fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_timelock: bool) {
9546         // In the first version of the chain::Confirm interface, after a refactor was made to not
9547         // broadcast CSV-locked transactions until their CSV lock is up, we wouldn't reliably broadcast
9548         // transactions after a `transactions_confirmed` call. Specifically, if the chain, provided via
9549         // `best_block_updated` is at height N, and a transaction output which we wish to spend at
9550         // height N-1 (due to a CSV to height N-1) is provided at height N, we will not broadcast the
9551         // spending transaction until height N+1 (or greater). This was due to the way
9552         // `ChannelMonitor::transactions_confirmed` worked, only checking if we should broadcast a
9553         // spending transaction at the height the input transaction was confirmed at, not whether we
9554         // should broadcast a spending transaction at the current height.
9555         // A second, similar, issue involved failing HTLCs backwards - because we only provided the
9556         // height at which transactions were confirmed to `OnchainTx::update_claims_view`, it wasn't
9557         // aware that the anti-reorg-delay had, in fact, already expired, waiting to fail-backwards
9558         // until we learned about an additional block.
9559         //
9560         // As an additional check, if `test_height_before_timelock` is set, we instead test that we
9561         // aren't broadcasting transactions too early (ie not broadcasting them at all).
9562         let chanmon_cfgs = create_chanmon_cfgs(3);
9563         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9564         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9565         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9566         *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
9567
9568         create_announced_chan_between_nodes(&nodes, 0, 1);
9569         let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
9570         let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
9571         nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
9572         nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
9573
9574         nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
9575         check_closed_broadcast!(nodes[1], true);
9576         check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
9577         check_added_monitors!(nodes[1], 1);
9578         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
9579         assert_eq!(node_txn.len(), 1);
9580
9581         let conf_height = nodes[1].best_block_info().1;
9582         if !test_height_before_timelock {
9583                 connect_blocks(&nodes[1], 24 * 6);
9584         }
9585         nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
9586                 &nodes[1].get_block_header(conf_height), &[(0, &node_txn[0])], conf_height);
9587         if test_height_before_timelock {
9588                 // If we confirmed the close transaction, but timelocks have not yet expired, we should not
9589                 // generate any events or broadcast any transactions
9590                 assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
9591                 assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
9592         } else {
9593                 // We should broadcast an HTLC transaction spending our funding transaction first
9594                 let spending_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
9595                 assert_eq!(spending_txn.len(), 2);
9596                 let htlc_tx = if spending_txn[0].txid() == node_txn[0].txid() {
9597                         &spending_txn[1]
9598                 } else {
9599                         &spending_txn[0]
9600                 };
9601                 check_spends!(htlc_tx, node_txn[0]);
9602                 // We should also generate a SpendableOutputs event with the to_self output (as its
9603                 // timelock is up).
9604                 let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
9605                 assert_eq!(descriptor_spend_txn.len(), 1);
9606
9607                 // If we also discover that the HTLC-Timeout transaction was confirmed some time ago, we
9608                 // should immediately fail-backwards the HTLC to the previous hop, without waiting for an
9609                 // additional block built on top of the current chain.
9610                 nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
9611                         &nodes[1].get_block_header(conf_height + 1), &[(0, htlc_tx)], conf_height + 1);
9612                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]);
9613                 check_added_monitors!(nodes[1], 1);
9614
9615                 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9616                 assert!(updates.update_add_htlcs.is_empty());
9617                 assert!(updates.update_fulfill_htlcs.is_empty());
9618                 assert_eq!(updates.update_fail_htlcs.len(), 1);
9619                 assert!(updates.update_fail_malformed_htlcs.is_empty());
9620                 assert!(updates.update_fee.is_none());
9621                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
9622                 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
9623                 expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_announce.contents.short_channel_id, true);
9624         }
9625 }
9626
9627 #[test]
9628 fn test_tx_confirmed_skipping_blocks_immediate_broadcast() {
9629         do_test_tx_confirmed_skipping_blocks_immediate_broadcast(false);
9630         do_test_tx_confirmed_skipping_blocks_immediate_broadcast(true);
9631 }
9632
9633 fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) {
9634         let chanmon_cfgs = create_chanmon_cfgs(2);
9635         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9636         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9637         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9638
9639         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
9640
9641         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
9642                 .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
9643         let route = get_route!(nodes[0], payment_params, 10_000).unwrap();
9644
9645         let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[1]);
9646
9647         {
9648                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
9649                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
9650                 check_added_monitors!(nodes[0], 1);
9651                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9652                 assert_eq!(events.len(), 1);
9653                 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
9654                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9655                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
9656         }
9657         expect_pending_htlcs_forwardable!(nodes[1]);
9658         expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 10_000);
9659
9660         {
9661                 // Note that we use a different PaymentId here to allow us to duplicativly pay
9662                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
9663                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_secret.0)).unwrap();
9664                 check_added_monitors!(nodes[0], 1);
9665                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9666                 assert_eq!(events.len(), 1);
9667                 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
9668                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9669                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
9670                 // At this point, nodes[1] would notice it has too much value for the payment. It will
9671                 // assume the second is a privacy attack (no longer particularly relevant
9672                 // post-payment_secrets) and fail back the new HTLC. Previously, it'd also have failed back
9673                 // the first HTLC delivered above.
9674         }
9675
9676         expect_pending_htlcs_forwardable_ignore!(nodes[1]);
9677         nodes[1].node.process_pending_htlc_forwards();
9678
9679         if test_for_second_fail_panic {
9680                 // Now we go fail back the first HTLC from the user end.
9681                 nodes[1].node.fail_htlc_backwards(&our_payment_hash);
9682
9683                 let expected_destinations = vec![
9684                         HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
9685                         HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
9686                 ];
9687                 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1],  expected_destinations);
9688                 nodes[1].node.process_pending_htlc_forwards();
9689
9690                 check_added_monitors!(nodes[1], 1);
9691                 let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9692                 assert_eq!(fail_updates_1.update_fail_htlcs.len(), 2);
9693
9694                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9695                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[1]);
9696                 commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
9697
9698                 let failure_events = nodes[0].node.get_and_clear_pending_events();
9699                 assert_eq!(failure_events.len(), 4);
9700                 if let Event::PaymentPathFailed { .. } = failure_events[0] {} else { panic!(); }
9701                 if let Event::PaymentFailed { .. } = failure_events[1] {} else { panic!(); }
9702                 if let Event::PaymentPathFailed { .. } = failure_events[2] {} else { panic!(); }
9703                 if let Event::PaymentFailed { .. } = failure_events[3] {} else { panic!(); }
9704         } else {
9705                 // Let the second HTLC fail and claim the first
9706                 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
9707                 nodes[1].node.process_pending_htlc_forwards();
9708
9709                 check_added_monitors!(nodes[1], 1);
9710                 let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9711                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9712                 commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
9713
9714                 expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new());
9715
9716                 claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
9717         }
9718 }
9719
9720 #[test]
9721 fn test_dup_htlc_second_fail_panic() {
9722         // Previously, if we received two HTLCs back-to-back, where the second overran the expected
9723         // value for the payment, we'd fail back both HTLCs after generating a `PaymentClaimable` event.
9724         // Then, if the user failed the second payment, they'd hit a "tried to fail an already failed
9725         // HTLC" debug panic. This tests for this behavior, checking that only one HTLC is auto-failed.
9726         do_test_dup_htlc_second_rejected(true);
9727 }
9728
9729 #[test]
9730 fn test_dup_htlc_second_rejected() {
9731         // Test that if we receive a second HTLC for an MPP payment that overruns the payment amount we
9732         // simply reject the second HTLC but are still able to claim the first HTLC.
9733         do_test_dup_htlc_second_rejected(false);
9734 }
9735
9736 #[test]
9737 fn test_inconsistent_mpp_params() {
9738         // Test that if we recieve two HTLCs with different payment parameters we fail back the first
9739         // such HTLC and allow the second to stay.
9740         let chanmon_cfgs = create_chanmon_cfgs(4);
9741         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9742         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9743         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9744
9745         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0);
9746         create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0);
9747         create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0);
9748         let chan_2_3 =create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0);
9749
9750         let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV)
9751                 .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap();
9752         let mut route = get_route!(nodes[0], payment_params, 15_000_000).unwrap();
9753         assert_eq!(route.paths.len(), 2);
9754         route.paths.sort_by(|path_a, _| {
9755                 // Sort the path so that the path through nodes[1] comes first
9756                 if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() {
9757                         core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
9758         });
9759
9760         let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[3]);
9761
9762         let cur_height = nodes[0].best_block_info().1;
9763         let payment_id = PaymentId([42; 32]);
9764
9765         let session_privs = {
9766                 // We create a fake route here so that we start with three pending HTLCs, which we'll
9767                 // ultimately have, just not right away.
9768                 let mut dup_route = route.clone();
9769                 dup_route.paths.push(route.paths[1].clone());
9770                 nodes[0].node.test_add_new_pending_payment(our_payment_hash,
9771                         RecipientOnionFields::secret_only(our_payment_secret), payment_id, &dup_route).unwrap()
9772         };
9773         nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash,
9774                 RecipientOnionFields::secret_only(our_payment_secret), 15_000_000, cur_height, payment_id,
9775                 &None, session_privs[0]).unwrap();
9776         check_added_monitors!(nodes[0], 1);
9777
9778         {
9779                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9780                 assert_eq!(events.len(), 1);
9781                 pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), false, None);
9782         }
9783         assert!(nodes[3].node.get_and_clear_pending_events().is_empty());
9784
9785         nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash,
9786                 RecipientOnionFields::secret_only(our_payment_secret), 14_000_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
9787         check_added_monitors!(nodes[0], 1);
9788
9789         {
9790                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9791                 assert_eq!(events.len(), 1);
9792                 let payment_event = SendEvent::from_event(events.pop().unwrap());
9793
9794                 nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9795                 commitment_signed_dance!(nodes[2], nodes[0], payment_event.commitment_msg, false);
9796
9797                 expect_pending_htlcs_forwardable!(nodes[2]);
9798                 check_added_monitors!(nodes[2], 1);
9799
9800                 let mut events = nodes[2].node.get_and_clear_pending_msg_events();
9801                 assert_eq!(events.len(), 1);
9802                 let payment_event = SendEvent::from_event(events.pop().unwrap());
9803
9804                 nodes[3].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
9805                 check_added_monitors!(nodes[3], 0);
9806                 commitment_signed_dance!(nodes[3], nodes[2], payment_event.commitment_msg, true, true);
9807
9808                 // At this point, nodes[3] should notice the two HTLCs don't contain the same total payment
9809                 // amount. It will assume the second is a privacy attack (no longer particularly relevant
9810                 // post-payment_secrets) and fail back the new HTLC.
9811         }
9812         expect_pending_htlcs_forwardable_ignore!(nodes[3]);
9813         nodes[3].node.process_pending_htlc_forwards();
9814         expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
9815         nodes[3].node.process_pending_htlc_forwards();
9816
9817         check_added_monitors!(nodes[3], 1);
9818
9819         let fail_updates_1 = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
9820         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
9821         commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false);
9822
9823         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]);
9824         check_added_monitors!(nodes[2], 1);
9825
9826         let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
9827         nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &fail_updates_2.update_fail_htlcs[0]);
9828         commitment_signed_dance!(nodes[0], nodes[2], fail_updates_2.commitment_signed, false);
9829
9830         expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain());
9831
9832         nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash,
9833                 RecipientOnionFields::secret_only(our_payment_secret), 15_000_000, cur_height, payment_id,
9834                 &None, session_privs[2]).unwrap();
9835         check_added_monitors!(nodes[0], 1);
9836
9837         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9838         assert_eq!(events.len(), 1);
9839         pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), true, None);
9840
9841         do_claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, our_payment_preimage);
9842         expect_payment_sent(&nodes[0], our_payment_preimage, Some(None), true, true);
9843 }
9844
9845 #[test]
9846 fn test_double_partial_claim() {
9847         // Test what happens if a node receives a payment, generates a PaymentClaimable event, the HTLCs
9848         // time out, the sender resends only some of the MPP parts, then the user processes the
9849         // PaymentClaimable event, ensuring they don't inadvertently claim only part of the full payment
9850         // amount.
9851         let chanmon_cfgs = create_chanmon_cfgs(4);
9852         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9853         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9854         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9855
9856         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0);
9857         create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0);
9858         create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0);
9859         create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0);
9860
9861         let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000);
9862         assert_eq!(route.paths.len(), 2);
9863         route.paths.sort_by(|path_a, _| {
9864                 // Sort the path so that the path through nodes[1] comes first
9865                 if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() {
9866                         core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
9867         });
9868
9869         send_along_route_with_secret(&nodes[0], route.clone(), &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 15_000_000, payment_hash, payment_secret);
9870         // nodes[3] has now received a PaymentClaimable event...which it will take some (exorbitant)
9871         // amount of time to respond to.
9872
9873         // Connect some blocks to time out the payment
9874         connect_blocks(&nodes[3], TEST_FINAL_CLTV);
9875         connect_blocks(&nodes[0], TEST_FINAL_CLTV); // To get the same height for sending later
9876
9877         let failed_destinations = vec![
9878                 HTLCDestination::FailedPayment { payment_hash },
9879                 HTLCDestination::FailedPayment { payment_hash },
9880         ];
9881         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations);
9882
9883         pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash, PaymentFailureReason::RecipientRejected);
9884
9885         // nodes[1] now retries one of the two paths...
9886         nodes[0].node.send_payment_with_route(&route, payment_hash,
9887                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
9888         check_added_monitors!(nodes[0], 2);
9889
9890         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9891         assert_eq!(events.len(), 2);
9892         let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
9893         pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_1_msgs, false, None);
9894
9895         // At this point nodes[3] has received one half of the payment, and the user goes to handle
9896         // that PaymentClaimable event they got hours ago and never handled...we should refuse to claim.
9897         nodes[3].node.claim_funds(payment_preimage);
9898         check_added_monitors!(nodes[3], 0);
9899         assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty());
9900 }
9901
9902 /// The possible events which may trigger a `max_dust_htlc_exposure` breach
9903 #[derive(Clone, Copy, PartialEq)]
9904 enum ExposureEvent {
9905         /// Breach occurs at HTLC forwarding (see `send_htlc`)
9906         AtHTLCForward,
9907         /// Breach occurs at HTLC reception (see `update_add_htlc`)
9908         AtHTLCReception,
9909         /// Breach occurs at outbound update_fee (see `send_update_fee`)
9910         AtUpdateFeeOutbound,
9911 }
9912
9913 fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool, multiplier_dust_limit: bool, apply_excess_fee: bool) {
9914         // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat`
9915         // policy.
9916         //
9917         // At HTLC forward (`send_payment()`), if the sum of the trimmed-to-dust HTLC inbound and
9918         // trimmed-to-dust HTLC outbound balance and this new payment as included on next
9919         // counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll reject the
9920         // update. At HTLC reception (`update_add_htlc()`), if the sum of the trimmed-to-dust HTLC
9921         // inbound and trimmed-to-dust HTLC outbound balance and this new received HTLC as included
9922         // on next counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll fail
9923         // the update. Note, we return a `temporary_channel_failure` (0x1000 | 7), as the channel
9924         // might be available again for HTLC processing once the dust bandwidth has cleared up.
9925
9926         let chanmon_cfgs = create_chanmon_cfgs(2);
9927         let mut config = test_default_channel_config();
9928
9929         // We hard-code the feerate values here but they're re-calculated furter down and asserted.
9930         // If the values ever change below these constants should simply be updated.
9931         const AT_FEE_OUTBOUND_HTLCS: u64 = 20;
9932         let nondust_htlc_count_in_limit =
9933         if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound  {
9934                 AT_FEE_OUTBOUND_HTLCS
9935         } else { 0 };
9936         let initial_feerate = if apply_excess_fee { 253 * 2 } else { 253 };
9937         let expected_dust_buffer_feerate = initial_feerate + 2530;
9938         let mut commitment_tx_cost = commit_tx_fee_msat(initial_feerate - 253, nondust_htlc_count_in_limit, &ChannelTypeFeatures::empty());
9939         commitment_tx_cost +=
9940                 if on_holder_tx {
9941                         htlc_success_tx_weight(&ChannelTypeFeatures::empty())
9942                 } else {
9943                         htlc_timeout_tx_weight(&ChannelTypeFeatures::empty())
9944                 } * (initial_feerate as u64 - 253) / 1000 * nondust_htlc_count_in_limit;
9945         {
9946                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
9947                 *feerate_lock = initial_feerate;
9948         }
9949         config.channel_config.max_dust_htlc_exposure = if multiplier_dust_limit {
9950                 // Default test fee estimator rate is 253 sat/kw, so we set the multiplier to 5_000_000 / 253
9951                 // to get roughly the same initial value as the default setting when this test was
9952                 // originally written.
9953                 MaxDustHTLCExposure::FeeRateMultiplier((5_000_000 + commitment_tx_cost) / 253)
9954         } else { MaxDustHTLCExposure::FixedLimitMsat(5_000_000 + commitment_tx_cost) };
9955         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9956         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
9957         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9958
9959         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap();
9960         let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9961         open_channel.common_fields.max_htlc_value_in_flight_msat = 50_000_000;
9962         open_channel.common_fields.max_accepted_htlcs = 60;
9963         if on_holder_tx {
9964                 open_channel.common_fields.dust_limit_satoshis = 546;
9965         }
9966         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
9967         let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
9968         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
9969
9970         let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9971
9972         let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
9973
9974         if on_holder_tx {
9975                 let mut node_0_per_peer_lock;
9976                 let mut node_0_peer_state_lock;
9977                 match get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id) {
9978                         ChannelPhase::UnfundedOutboundV1(chan) => {
9979                                 chan.context.holder_dust_limit_satoshis = 546;
9980                         },
9981                         _ => panic!("Unexpected ChannelPhase variant"),
9982                 }
9983         }
9984
9985         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
9986         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
9987         check_added_monitors!(nodes[1], 1);
9988         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
9989
9990         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
9991         check_added_monitors!(nodes[0], 1);
9992         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
9993
9994         let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
9995         let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
9996         update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
9997
9998         {
9999                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10000                 *feerate_lock = 253;
10001         }
10002
10003         // Fetch a route in advance as we will be unable to once we're unable to send.
10004         let (mut route, payment_hash, _, payment_secret) =
10005                 get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
10006
10007         let (dust_buffer_feerate, max_dust_htlc_exposure_msat) = {
10008                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
10009                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
10010                 let chan = chan_lock.channel_by_id.get(&channel_id).unwrap();
10011                 (chan.context().get_dust_buffer_feerate(None) as u64,
10012                 chan.context().get_max_dust_htlc_exposure_msat(253))
10013         };
10014         assert_eq!(dust_buffer_feerate, expected_dust_buffer_feerate as u64);
10015         let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - 1) * 1000;
10016         let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
10017
10018         // Substract 3 sats for multiplier and 2 sats for fixed limit to make sure we are 50% below the dust limit.
10019         // This is to make sure we fully use the dust limit. If we don't, we could end up with `dust_ibd_htlc_on_holder_tx` being 1
10020         // while `max_dust_htlc_exposure_msat` is not equal to `dust_outbound_htlc_on_holder_tx_msat`.
10021         let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - if multiplier_dust_limit { 3 } else { 2 }) * 1000;
10022         let dust_inbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat;
10023
10024         // This test was written with a fixed dust value here, which we retain, but assert that it is,
10025         // indeed, dust on both transactions.
10026         let dust_htlc_on_counterparty_tx: u64 = 4;
10027         let dust_htlc_on_counterparty_tx_msat: u64 = 1_250_000;
10028         let calcd_dust_htlc_on_counterparty_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - if multiplier_dust_limit { 3 } else { 2 }) * 1000;
10029         assert!(dust_htlc_on_counterparty_tx_msat < dust_inbound_htlc_on_holder_tx_msat);
10030         assert!(dust_htlc_on_counterparty_tx_msat < calcd_dust_htlc_on_counterparty_tx_msat);
10031
10032         if on_holder_tx {
10033                 if dust_outbound_balance {
10034                         // Outbound dust threshold: 2223 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`)
10035                         // Outbound dust balance: 4372 sats
10036                         // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2132 sats
10037                         for _ in 0..dust_outbound_htlc_on_holder_tx {
10038                                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_outbound_htlc_on_holder_tx_msat);
10039                                 nodes[0].node.send_payment_with_route(&route, payment_hash,
10040                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10041                         }
10042                 } else {
10043                         // Inbound dust threshold: 2324 sats (`dust_buffer_feerate` * HTLC_SUCCESS_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`)
10044                         // Inbound dust balance: 4372 sats
10045                         // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2031 sats
10046                         for _ in 0..dust_inbound_htlc_on_holder_tx {
10047                                 route_payment(&nodes[1], &[&nodes[0]], dust_inbound_htlc_on_holder_tx_msat);
10048                         }
10049                 }
10050         } else {
10051                 if dust_outbound_balance {
10052                         // Outbound dust threshold: 2132 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`)
10053                         // Outbound dust balance: 5000 sats
10054                         for _ in 0..dust_htlc_on_counterparty_tx - 1 {
10055                                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_htlc_on_counterparty_tx_msat);
10056                                 nodes[0].node.send_payment_with_route(&route, payment_hash,
10057                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10058                         }
10059                 } else {
10060                         // Inbound dust threshold: 2031 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`)
10061                         // Inbound dust balance: 5000 sats
10062                         for _ in 0..dust_htlc_on_counterparty_tx - 1 {
10063                                 route_payment(&nodes[1], &[&nodes[0]], dust_htlc_on_counterparty_tx_msat);
10064                         }
10065                 }
10066         }
10067
10068         if exposure_breach_event == ExposureEvent::AtHTLCForward {
10069                 route.paths[0].hops.last_mut().unwrap().fee_msat =
10070                         if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 1 };
10071                 // With default dust exposure: 5000 sats
10072                 if on_holder_tx {
10073                         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash,
10074                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
10075                                 ), true, APIError::ChannelUnavailable { .. }, {});
10076                 } else {
10077                         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash,
10078                                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
10079                                 ), true, APIError::ChannelUnavailable { .. }, {});
10080                 }
10081         } else if exposure_breach_event == ExposureEvent::AtHTLCReception {
10082                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 4 });
10083                 nodes[1].node.send_payment_with_route(&route, payment_hash,
10084                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10085                 check_added_monitors!(nodes[1], 1);
10086                 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
10087                 assert_eq!(events.len(), 1);
10088                 let payment_event = SendEvent::from_event(events.remove(0));
10089                 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
10090                 // With default dust exposure: 5000 sats
10091                 if on_holder_tx {
10092                         // Outbound dust balance: 6399 sats
10093                         let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1);
10094                         let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat;
10095                         nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, max_dust_htlc_exposure_msat), 1);
10096                 } else {
10097                         // Outbound dust balance: 5200 sats
10098                         nodes[0].logger.assert_log("lightning::ln::channel",
10099                                 format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
10100                                         dust_htlc_on_counterparty_tx_msat * dust_htlc_on_counterparty_tx + commitment_tx_cost + 4,
10101                                         max_dust_htlc_exposure_msat), 1);
10102                 }
10103         } else if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound {
10104                 route.paths[0].hops.last_mut().unwrap().fee_msat = 2_500_000;
10105                 // For the multiplier dust exposure limit, since it scales with feerate,
10106                 // we need to add a lot of HTLCs that will become dust at the new feerate
10107                 // to cross the threshold.
10108                 for _ in 0..AT_FEE_OUTBOUND_HTLCS {
10109                         let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[1], Some(1_000), None);
10110                         nodes[0].node.send_payment_with_route(&route, payment_hash,
10111                                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10112                 }
10113                 {
10114                         let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10115                         *feerate_lock = *feerate_lock * 10;
10116                 }
10117                 nodes[0].node.timer_tick_occurred();
10118                 check_added_monitors!(nodes[0], 1);
10119                 nodes[0].logger.assert_log_contains("lightning::ln::channel", "Cannot afford to send new feerate at 2530 without infringing max dust htlc exposure", 1);
10120         }
10121
10122         let _ = nodes[0].node.get_and_clear_pending_msg_events();
10123         let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
10124         added_monitors.clear();
10125 }
10126
10127 fn do_test_max_dust_htlc_exposure_by_threshold_type(multiplier_dust_limit: bool, apply_excess_fee: bool) {
10128         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit, apply_excess_fee);
10129         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit, apply_excess_fee);
10130         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit, apply_excess_fee);
10131         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit, apply_excess_fee);
10132         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit, apply_excess_fee);
10133         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit, apply_excess_fee);
10134         do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit, apply_excess_fee);
10135         do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit, apply_excess_fee);
10136         if !multiplier_dust_limit && !apply_excess_fee {
10137                 // Because non-dust HTLC transaction fees are included in the dust exposure, trying to
10138                 // increase the fee to hit a higher dust exposure with a
10139                 // `MaxDustHTLCExposure::FeeRateMultiplier` is no longer super practical, so we skip these
10140                 // in the `multiplier_dust_limit` case.
10141                 do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit, apply_excess_fee);
10142                 do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit, apply_excess_fee);
10143                 do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit, apply_excess_fee);
10144                 do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit, apply_excess_fee);
10145         }
10146 }
10147
10148 #[test]
10149 fn test_max_dust_htlc_exposure() {
10150         do_test_max_dust_htlc_exposure_by_threshold_type(false, false);
10151         do_test_max_dust_htlc_exposure_by_threshold_type(false, true);
10152         do_test_max_dust_htlc_exposure_by_threshold_type(true, false);
10153         do_test_max_dust_htlc_exposure_by_threshold_type(true, true);
10154 }
10155
10156 #[test]
10157 fn test_nondust_htlc_fees_are_dust() {
10158         // Test that the transaction fees paid in nondust HTLCs count towards our dust limit
10159         let chanmon_cfgs = create_chanmon_cfgs(3);
10160         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10161
10162         let mut config = test_default_channel_config();
10163         // Set the dust limit to the default value
10164         config.channel_config.max_dust_htlc_exposure =
10165                 MaxDustHTLCExposure::FeeRateMultiplier(10_000);
10166         // Make sure the HTLC limits don't get in the way
10167         config.channel_handshake_limits.min_max_accepted_htlcs = 400;
10168         config.channel_handshake_config.our_max_accepted_htlcs = 400;
10169         config.channel_handshake_config.our_htlc_minimum_msat = 1;
10170
10171         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config), Some(config), Some(config)]);
10172         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10173
10174         // Create a channel from 1 -> 0 but immediately push all of the funds towards 0
10175         let chan_id_1 = create_announced_chan_between_nodes(&nodes, 1, 0).2;
10176         while nodes[1].node.list_channels()[0].next_outbound_htlc_limit_msat > 0 {
10177                 send_payment(&nodes[1], &[&nodes[0]], nodes[1].node.list_channels()[0].next_outbound_htlc_limit_msat);
10178         }
10179
10180         // First get the channel one HTLC_VALUE HTLC away from the dust limit by sending dust HTLCs
10181         // repeatedly until we run out of space.
10182         const HTLC_VALUE: u64 = 1_000_000; // Doesn't matter, tune until the test passes
10183         let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], HTLC_VALUE).0;
10184
10185         while nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat == 0 {
10186                 route_payment(&nodes[0], &[&nodes[1]], HTLC_VALUE);
10187         }
10188         assert_ne!(nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat, 0,
10189                 "We don't want to run out of ability to send because of some non-dust limit");
10190         assert!(nodes[0].node.list_channels()[0].pending_outbound_htlcs.len() < 10,
10191                 "We should be able to fill our dust limit without too many HTLCs");
10192
10193         let dust_limit = nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat;
10194         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
10195         assert_ne!(nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat, 0,
10196                 "Make sure we are able to send once we clear one HTLC");
10197
10198         // At this point we have somewhere between dust_limit and dust_limit * 2 left in our dust
10199         // exposure limit, and we want to max that out using non-dust HTLCs.
10200         let commitment_tx_per_htlc_cost =
10201                 htlc_success_tx_weight(&ChannelTypeFeatures::empty()) * 253;
10202         let max_htlcs_remaining = dust_limit * 2 / commitment_tx_per_htlc_cost;
10203         assert!(max_htlcs_remaining < 30,
10204                 "We should be able to fill our dust limit without too many HTLCs");
10205         for i in 0..max_htlcs_remaining + 1 {
10206                 assert_ne!(i, max_htlcs_remaining);
10207                 if nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat < dust_limit {
10208                         // We found our limit, and it was less than max_htlcs_remaining!
10209                         // At this point we can only send dust HTLCs as any non-dust HTLCs will overuse our
10210                         // remaining dust exposure.
10211                         break;
10212                 }
10213                 route_payment(&nodes[0], &[&nodes[1]], dust_limit * 2);
10214         }
10215
10216         // At this point non-dust HTLCs are no longer accepted from node 0 -> 1, we also check that
10217         // such HTLCs can't be routed over the same channel either.
10218         create_announced_chan_between_nodes(&nodes, 2, 0);
10219         let (route, payment_hash, _, payment_secret) =
10220                 get_route_and_payment_hash!(nodes[2], nodes[1], dust_limit * 2);
10221         let onion = RecipientOnionFields::secret_only(payment_secret);
10222         nodes[2].node.send_payment_with_route(&route, payment_hash, onion, PaymentId([0; 32])).unwrap();
10223         check_added_monitors(&nodes[2], 1);
10224         let send = SendEvent::from_node(&nodes[2]);
10225
10226         nodes[0].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send.msgs[0]);
10227         commitment_signed_dance!(nodes[0], nodes[2], send.commitment_msg, false, true);
10228
10229         expect_pending_htlcs_forwardable!(nodes[0]);
10230         check_added_monitors(&nodes[0], 1);
10231         let node_id_1 = nodes[1].node.get_our_node_id();
10232         expect_htlc_handling_failed_destinations!(
10233                 nodes[0].node.get_and_clear_pending_events(),
10234                 &[HTLCDestination::NextHopChannel { node_id: Some(node_id_1), channel_id: chan_id_1 }]
10235         );
10236
10237         let fail = get_htlc_update_msgs(&nodes[0], &nodes[2].node.get_our_node_id());
10238         nodes[2].node.handle_update_fail_htlc(&nodes[0].node.get_our_node_id(), &fail.update_fail_htlcs[0]);
10239         commitment_signed_dance!(nodes[2], nodes[0], fail.commitment_signed, false);
10240         expect_payment_failed_conditions(&nodes[2], payment_hash, false, PaymentFailedConditions::new());
10241 }
10242
10243
10244 #[test]
10245 fn test_non_final_funding_tx() {
10246         let chanmon_cfgs = create_chanmon_cfgs(2);
10247         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10248         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10249         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10250
10251         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
10252         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10253         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10254         let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10255         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10256
10257         let best_height = nodes[0].node.best_block.read().unwrap().height;
10258
10259         let chan_id = *nodes[0].network_chan_count.borrow();
10260         let events = nodes[0].node.get_and_clear_pending_events();
10261         let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::ScriptBuf::new(), sequence: Sequence(1), witness: Witness::from_slice(&[&[1]]) };
10262         assert_eq!(events.len(), 1);
10263         let mut tx = match events[0] {
10264                 Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => {
10265                         // Timelock the transaction _beyond_ the best client height + 1.
10266                         Transaction { version: chan_id as i32, lock_time: LockTime::from_height(best_height + 2).unwrap(), input: vec![input], output: vec![TxOut {
10267                                 value: *channel_value_satoshis, script_pubkey: output_script.clone(),
10268                         }]}
10269                 },
10270                 _ => panic!("Unexpected event"),
10271         };
10272         // Transaction should fail as it's evaluated as non-final for propagation.
10273         match nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()) {
10274                 Err(APIError::APIMisuseError { err }) => {
10275                         assert_eq!(format!("Funding transaction absolute timelock is non-final"), err);
10276                 },
10277                 _ => panic!()
10278         }
10279         let err = "Error in transaction funding: Misuse error: Funding transaction absolute timelock is non-final".to_owned();
10280         check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(temp_channel_id, false, ClosureReason::ProcessingError { err })]);
10281         assert_eq!(get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id()).data, "Failed to fund channel");
10282 }
10283
10284 #[test]
10285 fn test_non_final_funding_tx_within_headroom() {
10286         let chanmon_cfgs = create_chanmon_cfgs(2);
10287         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10288         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10289         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10290
10291         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
10292         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10293         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10294         let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10295         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10296
10297         let best_height = nodes[0].node.best_block.read().unwrap().height;
10298
10299         let chan_id = *nodes[0].network_chan_count.borrow();
10300         let events = nodes[0].node.get_and_clear_pending_events();
10301         let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::ScriptBuf::new(), sequence: Sequence(1), witness: Witness::from_slice(&[[1]]) };
10302         assert_eq!(events.len(), 1);
10303         let mut tx = match events[0] {
10304                 Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => {
10305                         // Timelock the transaction within a +1 headroom from the best block.
10306                         Transaction { version: chan_id as i32, lock_time: LockTime::from_consensus(best_height + 1), input: vec![input], output: vec![TxOut {
10307                                 value: *channel_value_satoshis, script_pubkey: output_script.clone(),
10308                         }]}
10309                 },
10310                 _ => panic!("Unexpected event"),
10311         };
10312
10313         // Transaction should be accepted if it's in a +1 headroom from best block.
10314         assert!(nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
10315         get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
10316 }
10317
10318 #[test]
10319 fn accept_busted_but_better_fee() {
10320         // If a peer sends us a fee update that is too low, but higher than our previous channel
10321         // feerate, we should accept it. In the future we may want to consider closing the channel
10322         // later, but for now we only accept the update.
10323         let mut chanmon_cfgs = create_chanmon_cfgs(2);
10324         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10325         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10326         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10327
10328         create_chan_between_nodes(&nodes[0], &nodes[1]);
10329
10330         // Set nodes[1] to expect 5,000 sat/kW.
10331         {
10332                 let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
10333                 *feerate_lock = 5000;
10334         }
10335
10336         // If nodes[0] increases their feerate, even if its not enough, nodes[1] should accept it.
10337         {
10338                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10339                 *feerate_lock = 1000;
10340         }
10341         nodes[0].node.timer_tick_occurred();
10342         check_added_monitors!(nodes[0], 1);
10343
10344         let events = nodes[0].node.get_and_clear_pending_msg_events();
10345         assert_eq!(events.len(), 1);
10346         match events[0] {
10347                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
10348                         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
10349                         commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false);
10350                 },
10351                 _ => panic!("Unexpected event"),
10352         };
10353
10354         // If nodes[0] increases their feerate further, even if its not enough, nodes[1] should accept
10355         // it.
10356         {
10357                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10358                 *feerate_lock = 2000;
10359         }
10360         nodes[0].node.timer_tick_occurred();
10361         check_added_monitors!(nodes[0], 1);
10362
10363         let events = nodes[0].node.get_and_clear_pending_msg_events();
10364         assert_eq!(events.len(), 1);
10365         match events[0] {
10366                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
10367                         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
10368                         commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false);
10369                 },
10370                 _ => panic!("Unexpected event"),
10371         };
10372
10373         // However, if nodes[0] decreases their feerate, nodes[1] should reject it and close the
10374         // channel.
10375         {
10376                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
10377                 *feerate_lock = 1000;
10378         }
10379         nodes[0].node.timer_tick_occurred();
10380         check_added_monitors!(nodes[0], 1);
10381
10382         let events = nodes[0].node.get_and_clear_pending_msg_events();
10383         assert_eq!(events.len(), 1);
10384         match events[0] {
10385                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
10386                         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
10387                         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError {
10388                                 err: "Peer's feerate much too low. Actual: 1000. Our expected lower limit: 5000".to_owned() },
10389                                 [nodes[0].node.get_our_node_id()], 100000);
10390                         check_closed_broadcast!(nodes[1], true);
10391                         check_added_monitors!(nodes[1], 1);
10392                 },
10393                 _ => panic!("Unexpected event"),
10394         };
10395 }
10396
10397 fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash: bool) {
10398         let mut chanmon_cfgs = create_chanmon_cfgs(2);
10399         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10400         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10401         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10402         let min_final_cltv_expiry_delta = 120;
10403         let final_cltv_expiry_delta = if valid_delta { min_final_cltv_expiry_delta + 2 } else {
10404                 min_final_cltv_expiry_delta - 2 };
10405         let recv_value = 100_000;
10406
10407         create_chan_between_nodes(&nodes[0], &nodes[1]);
10408
10409         let payment_parameters = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), final_cltv_expiry_delta as u32);
10410         let (payment_hash, payment_preimage, payment_secret) = if use_user_hash {
10411                 let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[1],
10412                         Some(recv_value), Some(min_final_cltv_expiry_delta));
10413                 (payment_hash, payment_preimage, payment_secret)
10414         } else {
10415                 let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(recv_value), 7200, Some(min_final_cltv_expiry_delta)).unwrap();
10416                 (payment_hash, nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap(), payment_secret)
10417         };
10418         let route = get_route!(nodes[0], payment_parameters, recv_value).unwrap();
10419         nodes[0].node.send_payment_with_route(&route, payment_hash,
10420                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
10421         check_added_monitors!(nodes[0], 1);
10422         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
10423         assert_eq!(events.len(), 1);
10424         let mut payment_event = SendEvent::from_event(events.pop().unwrap());
10425         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
10426         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
10427         expect_pending_htlcs_forwardable!(nodes[1]);
10428
10429         if valid_delta {
10430                 expect_payment_claimable!(nodes[1], payment_hash, payment_secret, recv_value, if use_user_hash {
10431                         None } else { Some(payment_preimage) }, nodes[1].node.get_our_node_id());
10432
10433                 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
10434         } else {
10435                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
10436
10437                 check_added_monitors!(nodes[1], 1);
10438
10439                 let fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
10440                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates.update_fail_htlcs[0]);
10441                 commitment_signed_dance!(nodes[0], nodes[1], fail_updates.commitment_signed, false, true);
10442
10443                 expect_payment_failed!(nodes[0], payment_hash, true);
10444         }
10445 }
10446
10447 #[test]
10448 fn test_payment_with_custom_min_cltv_expiry_delta() {
10449         do_payment_with_custom_min_final_cltv_expiry(false, false);
10450         do_payment_with_custom_min_final_cltv_expiry(false, true);
10451         do_payment_with_custom_min_final_cltv_expiry(true, false);
10452         do_payment_with_custom_min_final_cltv_expiry(true, true);
10453 }
10454
10455 #[test]
10456 fn test_disconnects_peer_awaiting_response_ticks() {
10457         // Tests that nodes which are awaiting on a response critical for channel responsiveness
10458         // disconnect their counterparty after `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10459         let mut chanmon_cfgs = create_chanmon_cfgs(2);
10460         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10461         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10462         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10463
10464         // Asserts a disconnect event is queued to the user.
10465         let check_disconnect_event = |node: &Node, should_disconnect: bool| {
10466                 let disconnect_event = node.node.get_and_clear_pending_msg_events().iter().find_map(|event|
10467                         if let MessageSendEvent::HandleError { action, .. } = event {
10468                                 if let msgs::ErrorAction::DisconnectPeerWithWarning { .. } = action {
10469                                         Some(())
10470                                 } else {
10471                                         None
10472                                 }
10473                         } else {
10474                                 None
10475                         }
10476                 );
10477                 assert_eq!(disconnect_event.is_some(), should_disconnect);
10478         };
10479
10480         // Fires timer ticks ensuring we only attempt to disconnect peers after reaching
10481         // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10482         let check_disconnect = |node: &Node| {
10483                 // No disconnect without any timer ticks.
10484                 check_disconnect_event(node, false);
10485
10486                 // No disconnect with 1 timer tick less than required.
10487                 for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS - 1 {
10488                         node.node.timer_tick_occurred();
10489                         check_disconnect_event(node, false);
10490                 }
10491
10492                 // Disconnect after reaching the required ticks.
10493                 node.node.timer_tick_occurred();
10494                 check_disconnect_event(node, true);
10495
10496                 // Disconnect again on the next tick if the peer hasn't been disconnected yet.
10497                 node.node.timer_tick_occurred();
10498                 check_disconnect_event(node, true);
10499         };
10500
10501         create_chan_between_nodes(&nodes[0], &nodes[1]);
10502
10503         // We'll start by performing a fee update with Alice (nodes[0]) on the channel.
10504         *nodes[0].fee_estimator.sat_per_kw.lock().unwrap() *= 2;
10505         nodes[0].node.timer_tick_occurred();
10506         check_added_monitors!(&nodes[0], 1);
10507         let alice_fee_update = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
10508         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), alice_fee_update.update_fee.as_ref().unwrap());
10509         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &alice_fee_update.commitment_signed);
10510         check_added_monitors!(&nodes[1], 1);
10511
10512         // This will prompt Bob (nodes[1]) to respond with his `CommitmentSigned` and `RevokeAndACK`.
10513         let (bob_revoke_and_ack, bob_commitment_signed) = get_revoke_commit_msgs!(&nodes[1], nodes[0].node.get_our_node_id());
10514         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revoke_and_ack);
10515         check_added_monitors!(&nodes[0], 1);
10516         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_commitment_signed);
10517         check_added_monitors(&nodes[0], 1);
10518
10519         // Alice then needs to send her final `RevokeAndACK` to complete the commitment dance. We
10520         // pretend Bob hasn't received the message and check whether he'll disconnect Alice after
10521         // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10522         let alice_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
10523         check_disconnect(&nodes[1]);
10524
10525         // Now, we'll reconnect them to test awaiting a `ChannelReestablish` message.
10526         //
10527         // Note that since the commitment dance didn't complete above, Alice is expected to resend her
10528         // final `RevokeAndACK` to Bob to complete it.
10529         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
10530         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10531         let bob_init = msgs::Init {
10532                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
10533         };
10534         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &bob_init, true).unwrap();
10535         let alice_init = msgs::Init {
10536                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
10537         };
10538         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &alice_init, true).unwrap();
10539
10540         // Upon reconnection, Alice sends her `ChannelReestablish` to Bob. Alice, however, hasn't
10541         // received Bob's yet, so she should disconnect him after reaching
10542         // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10543         let alice_channel_reestablish = get_event_msg!(
10544                 nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()
10545         );
10546         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &alice_channel_reestablish);
10547         check_disconnect(&nodes[0]);
10548
10549         // Bob now sends his `ChannelReestablish` to Alice to resume the channel and consider it "live".
10550         let bob_channel_reestablish = nodes[1].node.get_and_clear_pending_msg_events().iter().find_map(|event|
10551                 if let MessageSendEvent::SendChannelReestablish { node_id, msg } = event {
10552                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
10553                         Some(msg.clone())
10554                 } else {
10555                         None
10556                 }
10557         ).unwrap();
10558         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bob_channel_reestablish);
10559
10560         // Sanity check that Alice won't disconnect Bob since she's no longer waiting for any messages.
10561         for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS {
10562                 nodes[0].node.timer_tick_occurred();
10563                 check_disconnect_event(&nodes[0], false);
10564         }
10565
10566         // However, Bob is still waiting on Alice's `RevokeAndACK`, so he should disconnect her after
10567         // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
10568         check_disconnect(&nodes[1]);
10569
10570         // Finally, have Bob process the last message.
10571         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &alice_revoke_and_ack);
10572         check_added_monitors(&nodes[1], 1);
10573
10574         // At this point, neither node should attempt to disconnect each other, since they aren't
10575         // waiting on any messages.
10576         for node in &nodes {
10577                 for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS {
10578                         node.node.timer_tick_occurred();
10579                         check_disconnect_event(node, false);
10580                 }
10581         }
10582 }
10583
10584 #[test]
10585 fn test_remove_expired_outbound_unfunded_channels() {
10586         let chanmon_cfgs = create_chanmon_cfgs(2);
10587         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10588         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10589         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10590
10591         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
10592         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10593         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10594         let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10595         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10596
10597         let events = nodes[0].node.get_and_clear_pending_events();
10598         assert_eq!(events.len(), 1);
10599         match events[0] {
10600                 Event::FundingGenerationReady { .. } => (),
10601                 _ => panic!("Unexpected event"),
10602         };
10603
10604         // Asserts the outbound channel has been removed from a nodes[0]'s peer state map.
10605         let check_outbound_channel_existence = |should_exist: bool| {
10606                 let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
10607                 let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
10608                 assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist);
10609         };
10610
10611         // Channel should exist without any timer ticks.
10612         check_outbound_channel_existence(true);
10613
10614         // Channel should exist with 1 timer tick less than required.
10615         for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
10616                 nodes[0].node.timer_tick_occurred();
10617                 check_outbound_channel_existence(true)
10618         }
10619
10620         // Remove channel after reaching the required ticks.
10621         nodes[0].node.timer_tick_occurred();
10622         check_outbound_channel_existence(false);
10623
10624         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
10625         assert_eq!(msg_events.len(), 1);
10626         match msg_events[0] {
10627                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
10628                         assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake");
10629                 },
10630                 _ => panic!("Unexpected event"),
10631         }
10632         check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
10633 }
10634
10635 #[test]
10636 fn test_remove_expired_inbound_unfunded_channels() {
10637         let chanmon_cfgs = create_chanmon_cfgs(2);
10638         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10639         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10640         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10641
10642         let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
10643         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10644         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
10645         let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
10646         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
10647
10648         let events = nodes[0].node.get_and_clear_pending_events();
10649         assert_eq!(events.len(), 1);
10650         match events[0] {
10651                 Event::FundingGenerationReady { .. } => (),
10652                 _ => panic!("Unexpected event"),
10653         };
10654
10655         // Asserts the inbound channel has been removed from a nodes[1]'s peer state map.
10656         let check_inbound_channel_existence = |should_exist: bool| {
10657                 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
10658                 let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
10659                 assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist);
10660         };
10661
10662         // Channel should exist without any timer ticks.
10663         check_inbound_channel_existence(true);
10664
10665         // Channel should exist with 1 timer tick less than required.
10666         for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
10667                 nodes[1].node.timer_tick_occurred();
10668                 check_inbound_channel_existence(true)
10669         }
10670
10671         // Remove channel after reaching the required ticks.
10672         nodes[1].node.timer_tick_occurred();
10673         check_inbound_channel_existence(false);
10674
10675         let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
10676         assert_eq!(msg_events.len(), 1);
10677         match msg_events[0] {
10678                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
10679                         assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake");
10680                 },
10681                 _ => panic!("Unexpected event"),
10682         }
10683         check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
10684 }
10685
10686 #[test]
10687 fn test_channel_close_when_not_timely_accepted() {
10688         // Create network of two nodes
10689         let chanmon_cfgs = create_chanmon_cfgs(2);
10690         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10691         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10692         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10693
10694         // Simulate peer-disconnects mid-handshake
10695         // The channel is initiated from the node 0 side,
10696         // but the nodes disconnect before node 1 could send accept channel
10697         let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
10698         let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10699         assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id);
10700
10701         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
10702         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10703
10704         // Make sure that we have not removed the OutboundV1Channel from node[0] immediately.
10705         assert_eq!(nodes[0].node.list_channels().len(), 1);
10706
10707         // Since channel was inbound from node[1] perspective, it should have been dropped immediately.
10708         assert_eq!(nodes[1].node.list_channels().len(), 0);
10709
10710         // In the meantime, some time passes.
10711         for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS {
10712                 nodes[0].node.timer_tick_occurred();
10713         }
10714
10715         // Since we disconnected from peer and did not connect back within time,
10716         // we should have forced-closed the channel by now.
10717         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
10718         assert_eq!(nodes[0].node.list_channels().len(), 0);
10719
10720         {
10721                 // Since accept channel message was never received
10722                 // The channel should be forced close by now from node 0 side
10723                 // and the peer removed from per_peer_state
10724                 let node_0_per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
10725                 assert_eq!(node_0_per_peer_state.len(), 0);
10726         }
10727 }
10728
10729 #[test]
10730 fn test_rebroadcast_open_channel_when_reconnect_mid_handshake() {
10731         // Create network of two nodes
10732         let chanmon_cfgs = create_chanmon_cfgs(2);
10733         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
10734         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
10735         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
10736
10737         // Simulate peer-disconnects mid-handshake
10738         // The channel is initiated from the node 0 side,
10739         // but the nodes disconnect before node 1 could send accept channel
10740         let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
10741         let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
10742         assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id);
10743
10744         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
10745         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10746
10747         // Make sure that we have not removed the OutboundV1Channel from node[0] immediately.
10748         assert_eq!(nodes[0].node.list_channels().len(), 1);
10749
10750         // Since channel was inbound from node[1] perspective, it should have been immediately dropped.
10751         assert_eq!(nodes[1].node.list_channels().len(), 0);
10752
10753         // The peers now reconnect
10754         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
10755                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
10756         }, true).unwrap();
10757         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
10758                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
10759         }, false).unwrap();
10760
10761         // Make sure the SendOpenChannel message is added to node_0 pending message events
10762         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
10763         assert_eq!(msg_events.len(), 1);
10764         match &msg_events[0] {
10765                 MessageSendEvent::SendOpenChannel { msg, .. } => assert_eq!(msg, &open_channel_msg),
10766                 _ => panic!("Unexpected message."),
10767         }
10768 }
10769
10770 fn do_test_multi_post_event_actions(do_reload: bool) {
10771         // Tests handling multiple post-Event actions at once.
10772         // There is specific code in ChannelManager to handle channels where multiple post-Event
10773         // `ChannelMonitorUpdates` are pending at once. This test exercises that code.
10774         //
10775         // Specifically, we test calling `get_and_clear_pending_events` while there are two
10776         // PaymentSents from different channels and one channel has two pending `ChannelMonitorUpdate`s
10777         // - one from an RAA and one from an inbound commitment_signed.
10778         let chanmon_cfgs = create_chanmon_cfgs(3);
10779         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10780         let (persister, chain_monitor);
10781         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
10782         let nodes_0_deserialized;
10783         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10784
10785         let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
10786         let chan_id_2 = create_announced_chan_between_nodes(&nodes, 0, 2).2;
10787
10788         send_payment(&nodes[0], &[&nodes[1]], 1_000_000);
10789         send_payment(&nodes[0], &[&nodes[2]], 1_000_000);
10790
10791         let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
10792         let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[2]], 1_000_000);
10793
10794         nodes[1].node.claim_funds(our_payment_preimage);
10795         check_added_monitors!(nodes[1], 1);
10796         expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000);
10797
10798         nodes[2].node.claim_funds(payment_preimage_2);
10799         check_added_monitors!(nodes[2], 1);
10800         expect_payment_claimed!(nodes[2], payment_hash_2, 1_000_000);
10801
10802         for dest in &[1, 2] {
10803                 let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[*dest], nodes[0].node.get_our_node_id());
10804                 nodes[0].node.handle_update_fulfill_htlc(&nodes[*dest].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]);
10805                 commitment_signed_dance!(nodes[0], nodes[*dest], htlc_fulfill_updates.commitment_signed, false);
10806                 check_added_monitors(&nodes[0], 0);
10807         }
10808
10809         let (route, payment_hash_3, _, payment_secret_3) =
10810                 get_route_and_payment_hash!(nodes[1], nodes[0], 100_000);
10811         let payment_id = PaymentId(payment_hash_3.0);
10812         nodes[1].node.send_payment_with_route(&route, payment_hash_3,
10813                 RecipientOnionFields::secret_only(payment_secret_3), payment_id).unwrap();
10814         check_added_monitors(&nodes[1], 1);
10815
10816         let send_event = SendEvent::from_node(&nodes[1]);
10817         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
10818         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event.commitment_msg);
10819         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
10820
10821         if do_reload {
10822                 let nodes_0_serialized = nodes[0].node.encode();
10823                 let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
10824                 let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_2).encode();
10825                 reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, chain_monitor, nodes_0_deserialized);
10826
10827                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10828                 nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id());
10829
10830                 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
10831                 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[2]));
10832         }
10833
10834         let events = nodes[0].node.get_and_clear_pending_events();
10835         assert_eq!(events.len(), 4);
10836         if let Event::PaymentSent { payment_preimage, .. } = events[0] {
10837                 assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2);
10838         } else { panic!(); }
10839         if let Event::PaymentSent { payment_preimage, .. } = events[1] {
10840                 assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2);
10841         } else { panic!(); }
10842         if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); }
10843         if let Event::PaymentPathSuccessful { .. } = events[3] {} else { panic!(); }
10844
10845         // After the events are processed, the ChannelMonitorUpdates will be released and, upon their
10846         // completion, we'll respond to nodes[1] with an RAA + CS.
10847         get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
10848         check_added_monitors(&nodes[0], 3);
10849 }
10850
10851 #[test]
10852 fn test_multi_post_event_actions() {
10853         do_test_multi_post_event_actions(true);
10854         do_test_multi_post_event_actions(false);
10855 }
10856
10857 #[test]
10858 fn test_batch_channel_open() {
10859         let chanmon_cfgs = create_chanmon_cfgs(3);
10860         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10861         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
10862         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10863
10864         // Initiate channel opening and create the batch channel funding transaction.
10865         let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
10866                 (&nodes[1], 100_000, 0, 42, None),
10867                 (&nodes[2], 200_000, 0, 43, None),
10868         ]);
10869
10870         // Go through the funding_created and funding_signed flow with node 1.
10871         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
10872         check_added_monitors(&nodes[1], 1);
10873         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
10874
10875         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
10876         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
10877         check_added_monitors(&nodes[0], 1);
10878
10879         // The transaction should not have been broadcast before all channels are ready.
10880         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
10881
10882         // Go through the funding_created and funding_signed flow with node 2.
10883         nodes[2].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[1]);
10884         check_added_monitors(&nodes[2], 1);
10885         expect_channel_pending_event(&nodes[2], &nodes[0].node.get_our_node_id());
10886
10887         let funding_signed_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
10888         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
10889         nodes[0].node.handle_funding_signed(&nodes[2].node.get_our_node_id(), &funding_signed_msg);
10890         check_added_monitors(&nodes[0], 1);
10891
10892         // The transaction should not have been broadcast before persisting all monitors has been
10893         // completed.
10894         assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
10895         assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
10896
10897         // Complete the persistence of the monitor.
10898         nodes[0].chain_monitor.complete_sole_pending_chan_update(
10899                 &ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 1 })
10900         );
10901         let events = nodes[0].node.get_and_clear_pending_events();
10902
10903         // The transaction should only have been broadcast now.
10904         let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
10905         assert_eq!(broadcasted_txs.len(), 1);
10906         assert_eq!(broadcasted_txs[0], tx);
10907
10908         assert_eq!(events.len(), 2);
10909         assert!(events.iter().any(|e| matches!(
10910                 *e,
10911                 crate::events::Event::ChannelPending {
10912                         ref counterparty_node_id,
10913                         ..
10914                 } if counterparty_node_id == &nodes[1].node.get_our_node_id(),
10915         )));
10916         assert!(events.iter().any(|e| matches!(
10917                 *e,
10918                 crate::events::Event::ChannelPending {
10919                         ref counterparty_node_id,
10920                         ..
10921                 } if counterparty_node_id == &nodes[2].node.get_our_node_id(),
10922         )));
10923 }
10924
10925 #[test]
10926 fn test_close_in_funding_batch() {
10927         // This test ensures that if one of the channels
10928         // in the batch closes, the complete batch will close.
10929         let chanmon_cfgs = create_chanmon_cfgs(3);
10930         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
10931         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
10932         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
10933
10934         // Initiate channel opening and create the batch channel funding transaction.
10935         let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
10936                 (&nodes[1], 100_000, 0, 42, None),
10937                 (&nodes[2], 200_000, 0, 43, None),
10938         ]);
10939
10940         // Go through the funding_created and funding_signed flow with node 1.
10941         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
10942         check_added_monitors(&nodes[1], 1);
10943         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
10944
10945         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
10946         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
10947         check_added_monitors(&nodes[0], 1);
10948
10949         // The transaction should not have been broadcast before all channels are ready.
10950         assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
10951
10952         // Force-close the channel for which we've completed the initial monitor.
10953         let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
10954         let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
10955         let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
10956         let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
10957
10958         nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
10959
10960         // The monitor should become closed.
10961         check_added_monitors(&nodes[0], 1);
10962         {
10963                 let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
10964                 let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
10965                 assert_eq!(monitor_updates_1.len(), 1);
10966                 assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
10967         }
10968
10969         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
10970         match msg_events[0] {
10971                 MessageSendEvent::HandleError { .. } => (),
10972                 _ => panic!("Unexpected message."),
10973         }
10974
10975         // We broadcast the commitment transaction as part of the force-close.
10976         {
10977                 let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
10978                 assert_eq!(broadcasted_txs.len(), 1);
10979                 assert!(broadcasted_txs[0].txid() != tx.txid());
10980                 assert_eq!(broadcasted_txs[0].input.len(), 1);
10981                 assert_eq!(broadcasted_txs[0].input[0].previous_output.txid, tx.txid());
10982         }
10983
10984         // All channels in the batch should close immediately.
10985         check_closed_events(&nodes[0], &[
10986                 ExpectedCloseEvent {
10987                         channel_id: Some(channel_id_1),
10988                         discard_funding: true,
10989                         channel_funding_txo: Some(funding_txo_1),
10990                         user_channel_id: Some(42),
10991                         ..Default::default()
10992                 },
10993                 ExpectedCloseEvent {
10994                         channel_id: Some(channel_id_2),
10995                         discard_funding: true,
10996                         channel_funding_txo: Some(funding_txo_2),
10997                         user_channel_id: Some(43),
10998                         ..Default::default()
10999                 },
11000         ]);
11001
11002         // Ensure the channels don't exist anymore.
11003         assert!(nodes[0].node.list_channels().is_empty());
11004 }
11005
11006 #[test]
11007 fn test_batch_funding_close_after_funding_signed() {
11008         let chanmon_cfgs = create_chanmon_cfgs(3);
11009         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
11010         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
11011         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
11012
11013         // Initiate channel opening and create the batch channel funding transaction.
11014         let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
11015                 (&nodes[1], 100_000, 0, 42, None),
11016                 (&nodes[2], 200_000, 0, 43, None),
11017         ]);
11018
11019         // Go through the funding_created and funding_signed flow with node 1.
11020         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
11021         check_added_monitors(&nodes[1], 1);
11022         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
11023
11024         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
11025         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
11026         check_added_monitors(&nodes[0], 1);
11027
11028         // Go through the funding_created and funding_signed flow with node 2.
11029         nodes[2].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[1]);
11030         check_added_monitors(&nodes[2], 1);
11031         expect_channel_pending_event(&nodes[2], &nodes[0].node.get_our_node_id());
11032
11033         let funding_signed_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
11034         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
11035         nodes[0].node.handle_funding_signed(&nodes[2].node.get_our_node_id(), &funding_signed_msg);
11036         check_added_monitors(&nodes[0], 1);
11037
11038         // The transaction should not have been broadcast before all channels are ready.
11039         assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
11040
11041         // Force-close the channel for which we've completed the initial monitor.
11042         let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
11043         let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
11044         let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
11045         let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
11046         nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
11047         check_added_monitors(&nodes[0], 2);
11048         {
11049                 let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
11050                 let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
11051                 assert_eq!(monitor_updates_1.len(), 1);
11052                 assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
11053                 let monitor_updates_2 = monitor_updates.get(&channel_id_2).unwrap();
11054                 assert_eq!(monitor_updates_2.len(), 1);
11055                 assert_eq!(monitor_updates_2[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
11056         }
11057         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
11058         match msg_events[0] {
11059                 MessageSendEvent::HandleError { .. } => (),
11060                 _ => panic!("Unexpected message."),
11061         }
11062
11063         // We broadcast the commitment transaction as part of the force-close.
11064         {
11065                 let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
11066                 assert_eq!(broadcasted_txs.len(), 1);
11067                 assert!(broadcasted_txs[0].txid() != tx.txid());
11068                 assert_eq!(broadcasted_txs[0].input.len(), 1);
11069                 assert_eq!(broadcasted_txs[0].input[0].previous_output.txid, tx.txid());
11070         }
11071
11072         // All channels in the batch should close immediately.
11073         check_closed_events(&nodes[0], &[
11074                 ExpectedCloseEvent {
11075                         channel_id: Some(channel_id_1),
11076                         discard_funding: true,
11077                         channel_funding_txo: Some(funding_txo_1),
11078                         user_channel_id: Some(42),
11079                         ..Default::default()
11080                 },
11081                 ExpectedCloseEvent {
11082                         channel_id: Some(channel_id_2),
11083                         discard_funding: true,
11084                         channel_funding_txo: Some(funding_txo_2),
11085                         user_channel_id: Some(43),
11086                         ..Default::default()
11087                 },
11088         ]);
11089
11090         // Ensure the channels don't exist anymore.
11091         assert!(nodes[0].node.list_channels().is_empty());
11092 }
11093
11094 fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitment: bool) {
11095         // Tests that a node will forget the channel (when it only requires 1 confirmation) if the
11096         // funding and commitment transaction confirm in the same block.
11097         let chanmon_cfgs = create_chanmon_cfgs(2);
11098         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
11099         let mut min_depth_1_block_cfg = test_default_channel_config();
11100         min_depth_1_block_cfg.channel_handshake_config.minimum_depth = 1;
11101         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(min_depth_1_block_cfg), Some(min_depth_1_block_cfg)]);
11102         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
11103
11104         let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
11105         let chan_id = ChannelId::v1_from_funding_outpoint(chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 });
11106
11107         assert_eq!(nodes[0].node.list_channels().len(), 1);
11108         assert_eq!(nodes[1].node.list_channels().len(), 1);
11109
11110         let (closing_node, other_node) = if confirm_remote_commitment {
11111                 (&nodes[1], &nodes[0])
11112         } else {
11113                 (&nodes[0], &nodes[1])
11114         };
11115
11116         closing_node.node.force_close_broadcasting_latest_txn(&chan_id, &other_node.node.get_our_node_id()).unwrap();
11117         let mut msg_events = closing_node.node.get_and_clear_pending_msg_events();
11118         assert_eq!(msg_events.len(), 1);
11119         match msg_events.pop().unwrap() {
11120                 MessageSendEvent::HandleError { action: msgs::ErrorAction::DisconnectPeer { .. }, .. } => {},
11121                 _ => panic!("Unexpected event"),
11122         }
11123         check_added_monitors(closing_node, 1);
11124         check_closed_event(closing_node, 1, ClosureReason::HolderForceClosed, false, &[other_node.node.get_our_node_id()], 1_000_000);
11125
11126         let commitment_tx = {
11127                 let mut txn = closing_node.tx_broadcaster.txn_broadcast();
11128                 assert_eq!(txn.len(), 1);
11129                 let commitment_tx = txn.pop().unwrap();
11130                 check_spends!(commitment_tx, funding_tx);
11131                 commitment_tx
11132         };
11133
11134         mine_transactions(&nodes[0], &[&funding_tx, &commitment_tx]);
11135         mine_transactions(&nodes[1], &[&funding_tx, &commitment_tx]);
11136
11137         check_closed_broadcast(other_node, 1, true);
11138         check_added_monitors(other_node, 1);
11139         check_closed_event(other_node, 1, ClosureReason::CommitmentTxConfirmed, false, &[closing_node.node.get_our_node_id()], 1_000_000);
11140
11141         assert!(nodes[0].node.list_channels().is_empty());
11142         assert!(nodes[1].node.list_channels().is_empty());
11143 }
11144
11145 #[test]
11146 fn test_funding_and_commitment_tx_confirm_same_block() {
11147         do_test_funding_and_commitment_tx_confirm_same_block(false);
11148         do_test_funding_and_commitment_tx_confirm_same_block(true);
11149 }
11150
11151 #[test]
11152 fn test_accept_inbound_channel_errors_queued() {
11153         // For manually accepted inbound channels, tests that a close error is correctly handled
11154         // and the channel fails for the initiator.
11155         let mut config0 = test_default_channel_config();
11156         let mut config1 = config0.clone();
11157         config1.channel_handshake_limits.their_to_self_delay = 1000;
11158         config1.manually_accept_inbound_channels = true;
11159         config0.channel_handshake_config.our_to_self_delay = 2000;
11160
11161         let chanmon_cfgs = create_chanmon_cfgs(2);
11162         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
11163         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config0), Some(config1)]);
11164         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
11165
11166         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
11167         let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
11168
11169         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
11170         let events = nodes[1].node.get_and_clear_pending_events();
11171         match events[0] {
11172                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
11173                         match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23) {
11174                                 Err(APIError::ChannelUnavailable { err: _ }) => (),
11175                                 _ => panic!(),
11176                         }
11177                 }
11178                 _ => panic!("Unexpected event"),
11179         }
11180         assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
11181                 open_channel_msg.common_fields.temporary_channel_id);
11182 }